Snap for 8426163 from 384c8c48399de6c9fd2dd0247084eda6d12c748d to mainline-tzdata2-release Change-Id: I754aeb7d4c57115b39f0341d750e8fbf0e6ade07
diff --git a/Android.bp b/Android.bp index 3ea7727..904718a 100644 --- a/Android.bp +++ b/Android.bp
@@ -14,37 +14,6 @@ * limitations under the License. */ -package { - default_applicable_licenses: ["packages_modules_NeuralNetworks_license"], -} - -// Added automatically by a large-scale-change that took the approach of -// 'apply every license found to every target'. While this makes sure we respect -// every license restriction, it may not be entirely correct. -// -// e.g. GPL in an MIT project might only apply to the contrib/ directory. -// -// Please consider splitting the single license below into multiple licenses, -// taking care not to lose any license_kind information, and overriding the -// default license using the 'licenses: [...]' property on targets as needed. -// -// For unused files, consider creating a 'fileGroup' with "//visibility:private" -// to attach the license to, and including a comment whether the files may be -// used in the current project. -// See: http://go/android-license-faq -license { - name: "packages_modules_NeuralNetworks_license", - visibility: [":__subpackages__"], - license_kinds: [ - "SPDX-license-identifier-Apache-2.0", - "SPDX-license-identifier-Artistic", - "SPDX-license-identifier-BSD", - "SPDX-license-identifier-MPL", - "SPDX-license-identifier-MPL-2.0", - ], - // large-scale-change unable to identify any license_text files -} - cc_defaults { name: "neuralnetworks_float16", arch: {
diff --git a/METADATA b/METADATA deleted file mode 100644 index 6111a0f..0000000 --- a/METADATA +++ /dev/null
@@ -1,4 +0,0 @@ -third_party { - # Would be NOTICE save for MPL in runtime/NOTICE - license_type: RECIPROCAL -}
diff --git a/OWNERS b/OWNERS index 630f200..68cef7c 100644 --- a/OWNERS +++ b/OWNERS
@@ -9,5 +9,3 @@ [email protected] [email protected] [email protected] - -include platform/packages/modules/common:/MODULES_OWNERS # see go/mainline-owners-policy
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg index 5582731..3c906e2 100644 --- a/PREUPLOAD.cfg +++ b/PREUPLOAD.cfg
@@ -2,8 +2,8 @@ ignore_merged_commits = true [Hook Scripts] -generated_tests = runtime/test/specs/generate_all_tests.sh --hook -generate_apis = tools/api/generate_api.sh --mode=hook +generated_tests = nn/runtime/test/specs/generate_all_tests.sh --hook +generate_apis = nn/tools/api/generate_api.sh --mode=ndk_hook [Builtin Hooks] bpfmt = true
diff --git a/TEST_MAPPING b/TEST_MAPPING index 63d3aed..bbe3960 100644 --- a/TEST_MAPPING +++ b/TEST_MAPPING
@@ -1,19 +1,9 @@ { - "mainline-presubmit": [ - { - "name": "CtsNNAPITestCases[com.google.android.neuralnetworks.apex]" - } - ], "presubmit": [ { - "name": "NeuralNetworksTest_utils" + "name": "CtsNNAPITestCases" }, { - "name": "CtsNNAPIJavaTestCases" - } - ], - "presubmit-large": [ - { "name": "NeuralNetworksTest_static", "options": [ { @@ -21,16 +11,17 @@ // pass consisting of: // * useCpuOnly = 0 // * computeMode = ComputeMode::ASYNC + // * allowSyncExecHal = 1 // - // The value here is a bitmask indicating only "pass 2" - // should be run (4 = 2^2). The bit conversions can be + // The value here is a bitmask indicating only "pass 10" + // should be run (1024 = 2^10). The bit conversions can be // found in packages/modules/NeuralNetworks/runtime/test/TestMain.cpp. - "native-test-flag": "4" + "native-test-flag": "1024" } ] }, { - "name": "CtsNNAPITestCases" + "name": "NeuralNetworksTest_utils" } ], "imports": [
diff --git a/apex/Android.bp b/apex/Android.bp index f99dd5b..42391eb 100644 --- a/apex/Android.bp +++ b/apex/Android.bp
@@ -14,10 +14,6 @@ * limitations under the License. */ -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - apex_key { name: "com.android.neuralnetworks.key", public_key: "com.android.neuralnetworks.avbpubkey", @@ -37,17 +33,12 @@ apex_defaults { name: "com.android.neuralnetworks-defaults", updatable: true, - min_sdk_version: "30", + min_sdk_version: "R", androidManifest: ":com.android.neuralnetworks-androidManifest", - native_shared_libs: [ - "libneuralnetworks", - ], + native_shared_libs: ["libneuralnetworks"], compile_multilib: "both", key: "com.android.neuralnetworks.key", certificate: ":com.android.neuralnetworks.certificate", - // Indicates that pre-installed version of this apex can be compressed. - // Whether it actually will be compressed is controlled on per-device basis. - compressible: true, } apex {
diff --git a/apex/manifest.json b/apex/manifest.json index 9690010..acbd048 100644 --- a/apex/manifest.json +++ b/apex/manifest.json
@@ -1,4 +1,4 @@ { "name": "com.android.neuralnetworks", - "version": 319999900 + "version": 309999900 }
diff --git a/apex/testing/Android.bp b/apex/testing/Android.bp index e2c41d2..226f040 100644 --- a/apex/testing/Android.bp +++ b/apex/testing/Android.bp
@@ -12,10 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - apex_test { name: "test_com.android.neuralnetworks", visibility: [
diff --git a/common/AidlBufferTracker.cpp b/common/AidlBufferTracker.cpp deleted file mode 100644 index fae1620..0000000 --- a/common/AidlBufferTracker.cpp +++ /dev/null
@@ -1,228 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "AidlBufferTracker.h" - -#include <android-base/macros.h> - -#include <memory> -#include <mutex> -#include <set> -#include <stack> -#include <utility> -#include <vector> - -#include "AidlHalInterfaces.h" -#include "CpuExecutor.h" -#include "nnapi/TypeUtils.h" - -namespace android::nn { - -std::shared_ptr<AidlManagedBuffer> AidlManagedBuffer::create( - uint32_t size, std::set<AidlHalPreparedModelRole> roles, const Operand& operand) { - std::unique_ptr<uint8_t[]> buffer(new (std::nothrow) uint8_t[size]); - if (buffer == nullptr) { - return nullptr; - } - if (isExtension(operand.type)) { - LOG(ERROR) << "AidlManagedBuffer cannot handle extension operands."; - return nullptr; - } - return std::make_shared<AidlManagedBuffer>(std::move(buffer), size, std::move(roles), operand); -} - -AidlManagedBuffer::AidlManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size, - std::set<AidlHalPreparedModelRole> roles, - const Operand& operand) - : kBuffer(std::move(buffer)), - kSize(size), - kRoles(std::move(roles)), - kOperandType(operand.type), - kInitialDimensions(operand.dimensions), - mUpdatedDimensions(operand.dimensions) { - CHECK(!isExtension(kOperandType)); -} - -ErrorStatus AidlManagedBuffer::validateRequest( - uint32_t poolIndex, const Request& request, - const aidl_hal::IPreparedModel* preparedModel) const { - CHECK_LT(poolIndex, request.pools.size()); - CHECK(std::holds_alternative<Request::MemoryDomainToken>(request.pools[poolIndex])); - std::lock_guard<std::mutex> guard(mMutex); - - bool usedAsInput = false, usedAsOutput = false; - for (uint32_t i = 0; i < request.inputs.size(); i++) { - if (request.inputs[i].lifetime != Request::Argument::LifeTime::POOL) continue; - if (request.inputs[i].location.poolIndex != poolIndex) continue; - // Validate if the input role is specified during allocation. - if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) { - LOG(ERROR) << "AidlManagedBuffer::validateRequest -- invalid buffer role."; - return ErrorStatus::INVALID_ARGUMENT; - } - if (!mInitialized) { - LOG(ERROR) - << "AidlManagedBuffer::validateRequest -- using uninitialized buffer as input " - "request."; - return ErrorStatus::GENERAL_FAILURE; - } - auto combined = combineDimensions(mUpdatedDimensions, request.inputs[i].dimensions); - if (!combined.has_value()) { - LOG(ERROR) << "AidlManagedBuffer::validateRequest -- incompatible dimensions (" - << toString(mUpdatedDimensions) << " vs " - << toString(request.inputs[i].dimensions) << ")"; - return ErrorStatus::INVALID_ARGUMENT; - } - usedAsInput = true; - } - for (uint32_t i = 0; i < request.outputs.size(); i++) { - if (request.outputs[i].lifetime != Request::Argument::LifeTime::POOL) continue; - if (request.outputs[i].location.poolIndex != poolIndex) continue; - if (usedAsInput || usedAsOutput) { - LOG(ERROR) << "AidlManagedBuffer::validateRequest -- using the same device memory for " - "input/output or multiple outputs"; - return ErrorStatus::INVALID_ARGUMENT; - } - // Validate if the output role is specified during allocation. - if (kRoles.count({preparedModel, IOType::OUTPUT, i}) == 0) { - LOG(ERROR) << "AidlManagedBuffer::validateRequest -- invalid buffer role."; - return ErrorStatus::INVALID_ARGUMENT; - } - auto combined = combineDimensions(kInitialDimensions, request.outputs[i].dimensions); - if (!combined.has_value()) { - LOG(ERROR) << "AidlManagedBuffer::validateRequest -- incompatible dimensions (" - << toString(kInitialDimensions) << " vs " - << toString(request.outputs[i].dimensions) << ")"; - return ErrorStatus::INVALID_ARGUMENT; - } - usedAsOutput = true; - } - return ErrorStatus::NONE; -} - -ErrorStatus AidlManagedBuffer::validateCopyFrom(const std::vector<uint32_t>& dimensions, - uint32_t size) const { - if (size != kSize) { - LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- invalid memory size: " << kSize - << " vs " << size; - return ErrorStatus::INVALID_ARGUMENT; - } - - if (nonExtensionOperandTypeIsScalar(static_cast<int>(kOperandType))) { - if (!dimensions.empty()) { - LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- invalid dimensions for scalar " - "operand: " - << toString(dimensions); - return ErrorStatus::INVALID_ARGUMENT; - } - return ErrorStatus::NONE; - } - - if (dimensions.empty()) { - if (tensorHasUnspecifiedDimensions(kOperandType, kInitialDimensions)) { - LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- the initial dimensions are not " - "fully " - "specified and no dimension update is provided: " - << toString(kInitialDimensions); - return ErrorStatus::INVALID_ARGUMENT; - } - } else { - if (tensorHasUnspecifiedDimensions(kOperandType, dimensions)) { - LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- the updated dimensions are not " - "fully " - "specified: " - << toString(dimensions); - return ErrorStatus::INVALID_ARGUMENT; - } - } - - const auto combined = combineDimensions(kInitialDimensions, dimensions); - if (!combined.has_value()) { - LOG(ERROR) << "AidlManagedBuffer::validateCopyFrom -- incompatible dimensions (" - << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")"; - return ErrorStatus::INVALID_ARGUMENT; - } - return ErrorStatus::NONE; -} - -ErrorStatus AidlManagedBuffer::validateCopyTo(uint32_t size) const { - if (size != kSize) { - LOG(ERROR) << "AidlManagedBuffer::validateCopyTo -- invalid memory size: " << kSize - << " vs " << size; - return ErrorStatus::INVALID_ARGUMENT; - } - std::lock_guard<std::mutex> guard(mMutex); - if (!mInitialized) { - LOG(ERROR) << "AidlManagedBuffer::validateCopyTo -- using uninitialized buffer as source."; - return ErrorStatus::GENERAL_FAILURE; - } - return ErrorStatus::NONE; -} - -bool AidlManagedBuffer::updateDimensions(const std::vector<uint32_t>& dimensions) { - auto combined = combineDimensions(kInitialDimensions, dimensions); - if (!combined.has_value()) { - LOG(ERROR) << "AidlManagedBuffer::updateDimensions -- incompatible dimensions (" - << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")"; - return false; - } - std::lock_guard<std::mutex> guard(mMutex); - mUpdatedDimensions = std::move(combined).value(); - return true; -} - -void AidlManagedBuffer::setInitialized(bool initialized) { - std::lock_guard<std::mutex> guard(mMutex); - mInitialized = initialized; -} - -std::unique_ptr<AidlBufferTracker::Token> AidlBufferTracker::add( - std::shared_ptr<AidlManagedBuffer> buffer) { - if (buffer == nullptr) { - return nullptr; - } - std::lock_guard<std::mutex> guard(mMutex); - uint32_t token = 0; - if (mFreeTokens.empty()) { - token = mTokenToBuffers.size(); - mTokenToBuffers.push_back(std::move(buffer)); - } else { - token = mFreeTokens.top(); - mFreeTokens.pop(); - mTokenToBuffers[token] = std::move(buffer); - } - VLOG(MEMORY) << "AidlBufferTracker::add -- new token = " << token; - return std::make_unique<Token>(token, shared_from_this()); -} - -std::shared_ptr<AidlManagedBuffer> AidlBufferTracker::get(uint32_t token) const { - std::lock_guard<std::mutex> guard(mMutex); - if (mTokenToBuffers.size() <= token || mTokenToBuffers[token] == nullptr) { - LOG(ERROR) << "AidlBufferTracker::get -- unknown token " << token; - return nullptr; - } - return mTokenToBuffers[token]; -} - -void AidlBufferTracker::free(uint32_t token) { - std::lock_guard<std::mutex> guard(mMutex); - CHECK_LT(token, mTokenToBuffers.size()); - CHECK(mTokenToBuffers[token] != nullptr); - VLOG(MEMORY) << "AidlBufferTracker::free -- release token = " << token; - mTokenToBuffers[token] = nullptr; - mFreeTokens.push(token); -} - -} // namespace android::nn
diff --git a/common/AidlHalUtils.cpp b/common/AidlHalUtils.cpp deleted file mode 100644 index c99803b..0000000 --- a/common/AidlHalUtils.cpp +++ /dev/null
@@ -1,75 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// This file contains pre-canonical-types utility code and includes HAL -// utilities. LegacyUtils.h is the subset of these utilities that do not touch -// HAL. - -#include "AidlHalUtils.h" - -#include <android-base/logging.h> -#include <nnapi/hal/aidl/Conversions.h> - -#include <algorithm> -#include <iterator> -#include <type_traits> -#include <vector> - -#include "AidlHalInterfaces.h" -#include "LegacyUtils.h" - -namespace android::nn { - -std::vector<aidl_hal::OperandPerformance> nonExtensionOperandPerformance( - aidl_hal::PerformanceInfo perf) { - static constexpr ndk::enum_range<aidl_hal::OperandType> kOperandTypeRange; - std::vector<aidl_hal::OperandPerformance> ret; - ret.reserve(std::distance(kOperandTypeRange.begin(), kOperandTypeRange.end())); - for (aidl_hal::OperandType type : kOperandTypeRange) { - if (type != aidl_hal::OperandType::SUBGRAPH) { - ret.push_back(aidl_hal::OperandPerformance{type, perf}); - } - } - std::sort(ret.begin(), ret.end(), - [](const aidl_hal::OperandPerformance& a, const aidl_hal::OperandPerformance& b) { - return a.type < b.type; - }); - - return ret; -} - -void update(std::vector<aidl_hal::OperandPerformance>* operandPerformance, - aidl_hal::OperandType type, aidl_hal::PerformanceInfo perf) { - CHECK(operandPerformance != nullptr); - const auto it = std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type, - [](const aidl_hal::OperandPerformance& perf, - aidl_hal::OperandType type) { return perf.type < type; }); - CHECK(it != operandPerformance->end()) - << toString(type) << " not in operand performance vector"; - it->info = perf; -} - -bool isExtensionOperandType(aidl_hal::OperandType type) { - return isExtensionOperandType(convert(type).value()); -} - -aidl_hal::ErrorStatus convertResultCodeToAidlErrorStatus(int resultCode) { - const auto errorStatus = aidl_hal::utils::convert(convertResultCodeToErrorStatus(resultCode)); - CHECK(errorStatus.has_value()) << "Unhandled error (" << errorStatus.error().code - << "): " << errorStatus.error().message; - return errorStatus.value(); -} - -} // namespace android::nn
diff --git a/common/AidlValidateHal.cpp b/common/AidlValidateHal.cpp deleted file mode 100644 index 913d9dc..0000000 --- a/common/AidlValidateHal.cpp +++ /dev/null
@@ -1,133 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "ValidateHal" - -#include "AidlValidateHal.h" - -#include <android-base/logging.h> -#include <nnapi/hal/aidl/Conversions.h> - -#include <algorithm> -#include <memory> -#include <set> -#include <utility> -#include <vector> - -#include "LegacyUtils.h" -#include "nnapi/TypeUtils.h" - -namespace android { -namespace nn { - -bool validateMemoryDesc( - const aidl_hal::BufferDesc& desc, - const std::vector<std::shared_ptr<aidl_hal::IPreparedModel>>& preparedModels, - const std::vector<aidl_hal::BufferRole>& inputRoles, - const std::vector<aidl_hal::BufferRole>& outputRoles, - std::function<const aidl_hal::Model*(const std::shared_ptr<aidl_hal::IPreparedModel>&)> - getModel, - std::set<AidlHalPreparedModelRole>* preparedModelRoles, - aidl_hal::Operand* combinedOperand) { - NN_RET_CHECK(preparedModels.size() != 0); - NN_RET_CHECK(inputRoles.size() != 0 || outputRoles.size() != 0); - - std::set<AidlHalPreparedModelRole> roles; - std::vector<aidl_hal::Operand> operands; - operands.reserve(inputRoles.size() + outputRoles.size()); - for (const auto& role : inputRoles) { - NN_RET_CHECK_LT(role.modelIndex, preparedModels.size()); - const auto& preparedModel = preparedModels[role.modelIndex]; - NN_RET_CHECK(preparedModel != nullptr); - const auto* model = getModel(preparedModel); - NN_RET_CHECK(model != nullptr); - const auto& inputIndexes = model->main.inputIndexes; - NN_RET_CHECK_LT(role.ioIndex, inputIndexes.size()); - NN_RET_CHECK_GT(role.probability, 0.0f); - NN_RET_CHECK_LE(role.probability, 1.0f); - const auto [it, success] = roles.emplace(preparedModel.get(), IOType::INPUT, role.ioIndex); - NN_RET_CHECK(success); - operands.push_back(model->main.operands[inputIndexes[role.ioIndex]]); - } - for (const auto& role : outputRoles) { - NN_RET_CHECK_LT(role.modelIndex, preparedModels.size()); - const auto& preparedModel = preparedModels[role.modelIndex]; - NN_RET_CHECK(preparedModel != nullptr); - const auto* model = getModel(preparedModel); - NN_RET_CHECK(model != nullptr); - const auto& outputIndexes = model->main.outputIndexes; - NN_RET_CHECK_LT(role.ioIndex, outputIndexes.size()); - NN_RET_CHECK_GT(role.probability, 0.0f); - NN_RET_CHECK_LE(role.probability, 1.0f); - const auto [it, success] = roles.emplace(preparedModel.get(), IOType::OUTPUT, role.ioIndex); - NN_RET_CHECK(success); - operands.push_back(model->main.operands[outputIndexes[role.ioIndex]]); - } - - CHECK(!operands.empty()); - const auto opType = operands[0].type; - const auto canonicalOperandType = convert(opType); - NN_RET_CHECK(canonicalOperandType.has_value()) << canonicalOperandType.error().message; - const bool isExtensionOperand = isExtension(canonicalOperandType.value()); - - auto maybeDimensions = toUnsigned(desc.dimensions); - NN_RET_CHECK(maybeDimensions.has_value()) << maybeDimensions.error().message; - std::vector<uint32_t> dimensions = std::move(maybeDimensions).value(); - - for (const auto& operand : operands) { - NN_RET_CHECK(operand.type == operands[0].type) - << toString(operand.type) << " vs " << toString(operands[0].type); - NN_RET_CHECK_EQ(operand.scale, operands[0].scale); - NN_RET_CHECK_EQ(operand.zeroPoint, operands[0].zeroPoint); - // NOTE: validateMemoryDesc cannot validate extra parameters for extension operand type. - if (!isExtensionOperand) { - const auto& lhsExtraParams = operand.extraParams; - const auto& rhsExtraParams = operands[0].extraParams; - NN_RET_CHECK(lhsExtraParams == rhsExtraParams) - << (lhsExtraParams.has_value() ? lhsExtraParams.value().toString() - : "std::nullopt") - << " vs " - << (rhsExtraParams.has_value() ? rhsExtraParams.value().toString() - : "std::nullopt"); - } - const auto maybeRhsDimensions = toUnsigned(operand.dimensions); - NN_RET_CHECK(maybeRhsDimensions.has_value()) << maybeRhsDimensions.error().message; - const auto combined = combineDimensions(dimensions, maybeRhsDimensions.value()); - NN_RET_CHECK(combined.has_value()); - dimensions = combined.value(); - } - - // NOTE: validateMemoryDesc cannot validate scalar dimensions with extension operand type. - if (!isExtensionOperand) { - NN_RET_CHECK(!nonExtensionOperandTypeIsScalar(static_cast<int>(opType)) || - dimensions.empty()) - << "invalid dimensions with scalar operand type."; - } - - if (preparedModelRoles != nullptr) { - *preparedModelRoles = std::move(roles); - } - if (combinedOperand != nullptr) { - *combinedOperand = operands[0]; - // No need to check that values fit int32_t here, since the original values are obtained - // from int32_t. - combinedOperand->dimensions = aidl_hal::utils::toSigned(dimensions).value(); - } - return true; -} - -} // namespace nn -} // namespace android
diff --git a/common/Android.bp b/common/Android.bp index 44180e3..7680eb1 100644 --- a/common/Android.bp +++ b/common/Android.bp
@@ -14,10 +14,6 @@ * limitations under the License. */ -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - cc_library_headers { name: "libneuralnetworks_common_headers", host_supported: false, @@ -88,22 +84,18 @@ ], export_include_dirs: ["include"], srcs: [ - "AidlHalUtils.cpp", "ExecutionBurstController.cpp", "ExecutionBurstServer.cpp", - "LegacyHalUtils.cpp", - "LegacyUtils.cpp", "MemoryUtils.cpp", + "Utils.cpp", ], header_libs: [ "gemmlowp_headers", "libeigen", "libneuralnetworks_headers", - "libruy_headers", "tensorflow_headers", ], shared_libs: [ - "android.hardware.neuralnetworks-V1-ndk_platform", "[email protected]", "[email protected]", "[email protected]", @@ -117,13 +109,6 @@ ], whole_static_libs: [ "libarect", - "neuralnetworks_types", - "neuralnetworks_utils_hal_1_0", // TODO(b/160669116): Remove VNDK dependencies. - "neuralnetworks_utils_hal_1_1", - "neuralnetworks_utils_hal_1_2", - "neuralnetworks_utils_hal_1_3", - "neuralnetworks_utils_hal_aidl", - "neuralnetworks_utils_hal_common", ], cflags: [ "-DTF_LITE_DISABLE_X86_NEON", @@ -159,23 +144,18 @@ "include", ], srcs: [ - "AidlBufferTracker.cpp", - "AidlHalUtils.cpp", - "AidlValidateHal.cpp", "BufferTracker.cpp", "CpuExecutor.cpp", "ExecutionBurstController.cpp", "ExecutionBurstServer.cpp", "GraphDump.cpp", - "HalBufferTracker.cpp", "IndexedShapeWrapper.cpp", - "LegacyHalUtils.cpp", - "LegacyUtils.cpp", "MemoryUtils.cpp", "MetaModel.cpp", "OperationsUtils.cpp", "QuantUtils.cpp", "TokenHasher.cpp", + "Utils.cpp", "ValidateHal.cpp", "operations/ArgMinMax.cpp", "operations/BidirectionalSequenceLSTM.cpp", @@ -198,7 +178,6 @@ "operations/Tile.cpp", ], shared_libs: [ - "android.hardware.neuralnetworks-V1-ndk_platform", "[email protected]", "[email protected]", "[email protected]", @@ -225,19 +204,10 @@ whole_static_libs: [ "libarect", "libtflite_kernel_utils", - "neuralnetworks_types", - "neuralnetworks_utils_hal_1_0", // TODO(b/160669116): Remove VNDK dependencies. - "neuralnetworks_utils_hal_1_1", - "neuralnetworks_utils_hal_1_2", - "neuralnetworks_utils_hal_1_3", - "neuralnetworks_utils_hal_aidl", - "neuralnetworks_utils_hal_common", - "neuralnetworks_utils_hal_service", "philox_random", ], static_libs: [ "libcrypto_static", - "libruy_static", "libtextclassifier_hash_static", ], cflags: [ @@ -259,219 +229,6 @@ } cc_defaults { - name: "neuralnetworks_cl_defaults", - host_supported: false, - vendor_available: false, - stl: "libc++_static", - sdk_version: "current", - min_sdk_version: "29", - cflags: [ - "-DNN_COMPATIBILITY_LIBRARY_BUILD", - ], -} - -cc_defaults { - name: "libneuralnetworks_common_cl_defaults", - defaults: [ - "neuralnetworks_cl_defaults", - "neuralnetworks_defaults", - "neuralnetworks_operations", - ], - apex_available: [ - "//apex_available:platform", - "com.android.neuralnetworks", - "test_com.android.neuralnetworks", - ], - // b/109953668, disable OpenMP - // openmp: true, - export_include_dirs: [ - "include", - ], - srcs: [ - "BufferTracker.cpp", - "CpuExecutor.cpp", - "GraphDump.cpp", - "IndexedShapeWrapper.cpp", - "LegacyUtils.cpp", - "MetaModel.cpp", - "OperationsUtils.cpp", - "TokenHasher.cpp", - ], - header_libs: [ - "libneuralnetworks_headers_ndk", - ], - static_libs: [ - "libbase_ndk", - "libcrypto_static", - ], - shared_libs: [ - "libnativewindow", - ], - cflags: [ - "-DNAMESPACE_FOR_HASH_FUNCTIONS=farmhash", - "-DTF_LITE_DISABLE_X86_NEON", - "-Wall", - "-Werror", - "-Wextra", - "-Wno-array-bounds", - "-Wno-extern-c-compat", - "-Wno-invalid-partial-specialization", - "-Wno-sign-compare", - "-Wno-unused-local-typedef", - "-Wno-unused-parameter", - "-Wno-unused-private-field", - "-Wno-unused-variable", - ], -} - -cc_library_static { - name: "libneuralnetworks_common_cl", - defaults: ["libneuralnetworks_common_cl_defaults"], -} - -cc_library_static { - name: "libneuralnetworks_common_cl_cpu", - defaults: ["libneuralnetworks_common_cl_defaults"], - sdk_version: "", // Override neuralnetworks_cl_defaults - min_sdk_version: "", - header_libs: [ - "gemmlowp_headers", - "libeigen", - "libruy_headers", - "libtextclassifier_hash_headers", - "philox_random_headers", - "tensorflow_headers", - ], - whole_static_libs: [ - "libarect", - "libtflite_kernel_utils", - "neuralnetworks_types_cl", - "philox_random", - ], - static_libs: [ - "libcrypto_static", - "libtextclassifier_hash_static", - ], - srcs: [ - "QuantUtils.cpp", - "operations/ArgMinMax.cpp", - "operations/BidirectionalSequenceLSTM.cpp", - "operations/Cast.cpp", - "operations/EmbeddingLookup.cpp", - "operations/ExpandDims.cpp", - "operations/GroupedConv2D.cpp", - "operations/HashtableLookup.cpp", - "operations/LSHProjection.cpp", - "operations/LSTM.cpp", - "operations/MaximumMinimum.cpp", - "operations/Multinomial.cpp", - "operations/Pow.cpp", - "operations/QuantizedLSTM.cpp", - "operations/RNN.cpp", - "operations/Reshape.cpp", - "operations/SVDF.cpp", - "operations/SimpleMath.cpp", - "operations/Split.cpp", - "operations/Tile.cpp", - ], - cflags: [ - "-DNAMESPACE_FOR_HASH_FUNCTIONS=farmhash", - "-DNN_COMPATIBILITY_LIBRARY_BUILD", - "-DNN_INCLUDE_CPU_IMPLEMENTATION", - "-DTF_LITE_DISABLE_X86_NEON", - "-Wno-array-bounds", - "-Wno-extern-c-compat", - "-Wno-invalid-partial-specialization", - "-Wno-sign-compare", - "-Wno-unused-local-typedef", - "-Wno-unused-parameter", - "-Wno-unused-private-field", - "-Wno-unused-variable", - ], -} - -cc_defaults { - name: "neuralnetworks_utils_defaults", - // b/146324523, NNAPI host build capability - host_supported: false, - vendor_available: true, - apex_available: [ - "//apex_available:platform", - "com.android.neuralnetworks", - "test_com.android.neuralnetworks", - ], - min_sdk_version: "30", - cflags: [ - "-Wall", - "-Werror", - "-Wextra", - ], -} - -cc_library_static { - name: "neuralnetworks_types", - defaults: ["neuralnetworks_utils_defaults"], - srcs: [ - "SharedMemory.cpp", - "TypeUtils.cpp", - "Types.cpp", - "Validation.cpp", - ], - target: { - android: { - srcs: ["SharedMemoryAndroid.cpp"], - shared_libs: [ - "[email protected]", - "[email protected]", - "libhidlbase", - "libhidlmemory", - "libnativewindow", - ], - static_libs: ["libarect"], - }, - }, - local_include_dirs: ["include/nnapi"], - export_include_dirs: ["include"], - shared_libs: [ - "libbase", - "libcutils", - "libutils", - ], - export_shared_lib_headers: [ - "libbase", - "libcutils", - "libutils", - ], -} - -cc_library_static { - name: "neuralnetworks_types_cl", - defaults: [ - "neuralnetworks_cl_defaults", - "neuralnetworks_utils_defaults", - ], - srcs: [ - "DynamicCLDeps.cpp", - "SharedMemory.cpp", - "SharedMemoryAndroid.cpp", - "TypeUtils.cpp", - "Types.cpp", - "Validation.cpp", - ], - local_include_dirs: ["include/nnapi"], - export_include_dirs: ["include"], - static_libs: [ - "libbase_ndk", - ], - export_static_lib_headers: [ - "libbase_ndk", - ], - shared_libs: [ - "libandroid", - ], -} - -cc_defaults { name: "NeuralNetworksTest_common", defaults: ["neuralnetworks_float16"], shared_libs: [ @@ -482,10 +239,8 @@ "libnativewindow", "libneuralnetworks", "libneuralnetworks_packageinfo", - "libutils", ], static_libs: [ - "android.hardware.neuralnetworks-V1-ndk_platform", "[email protected]", "[email protected]", "[email protected]",
diff --git a/common/BufferTracker.cpp b/common/BufferTracker.cpp index a27af51..e6b8d94 100644 --- a/common/BufferTracker.cpp +++ b/common/BufferTracker.cpp
@@ -26,12 +26,13 @@ #include <vector> #include "CpuExecutor.h" -#include "LegacyUtils.h" -#include "nnapi/TypeUtils.h" -#include "nnapi/Validation.h" +#include "HalInterfaces.h" +#include "Utils.h" namespace android::nn { +using namespace hal; + std::shared_ptr<ManagedBuffer> ManagedBuffer::create(uint32_t size, std::set<PreparedModelRole> roles, const Operand& operand) { @@ -39,7 +40,7 @@ if (buffer == nullptr) { return nullptr; } - if (isExtension(operand.type)) { + if (isExtensionOperandType(operand.type)) { LOG(ERROR) << "ManagedBuffer cannot handle extension operands."; return nullptr; } @@ -54,18 +55,19 @@ kOperandType(operand.type), kInitialDimensions(operand.dimensions), mUpdatedDimensions(operand.dimensions) { - CHECK(!isExtension(kOperandType)); + CHECK(!isExtensionOperandType(kOperandType)); } ErrorStatus ManagedBuffer::validateRequest(uint32_t poolIndex, const Request& request, const IPreparedModel* preparedModel) const { CHECK_LT(poolIndex, request.pools.size()); - CHECK(std::holds_alternative<Request::MemoryDomainToken>(request.pools[poolIndex])); + CHECK(request.pools[poolIndex].getDiscriminator() == + Request::MemoryPool::hidl_discriminator::token); std::lock_guard<std::mutex> guard(mMutex); bool usedAsInput = false, usedAsOutput = false; for (uint32_t i = 0; i < request.inputs.size(); i++) { - if (request.inputs[i].lifetime != Request::Argument::LifeTime::POOL) continue; + if (request.inputs[i].hasNoValue) continue; if (request.inputs[i].location.poolIndex != poolIndex) continue; // Validate if the input role is specified during allocation. if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) { @@ -87,7 +89,7 @@ usedAsInput = true; } for (uint32_t i = 0; i < request.outputs.size(); i++) { - if (request.outputs[i].lifetime != Request::Argument::LifeTime::POOL) continue; + if (request.outputs[i].hasNoValue) continue; if (request.outputs[i].location.poolIndex != poolIndex) continue; if (usedAsInput || usedAsOutput) { LOG(ERROR) << "ManagedBuffer::validateRequest -- using the same device memory for " @@ -170,7 +172,7 @@ bool ManagedBuffer::updateDimensions(const std::vector<uint32_t>& dimensions) { auto combined = combineDimensions(kInitialDimensions, dimensions); - if (!combined.has_value()) { + if (!combined) { LOG(ERROR) << "ManagedBuffer::updateDimensions -- incompatible dimensions (" << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")"; return false; @@ -185,53 +187,39 @@ mInitialized = initialized; } -BufferTracker::BufferTracker() { - constexpr size_t kPreallocatedElements = 1024; - using StackSpace = std::vector<Request::MemoryDomainToken>; - using Stack = std::stack<Request::MemoryDomainToken, StackSpace>; - StackSpace stackSpace; - stackSpace.reserve(kPreallocatedElements); - mFreeTokens = Stack(std::move(stackSpace)); - mTokenToBuffers.reserve(kPreallocatedElements); - mTokenToBuffers.emplace_back(); -} - std::unique_ptr<BufferTracker::Token> BufferTracker::add(std::shared_ptr<ManagedBuffer> buffer) { if (buffer == nullptr) { return nullptr; } std::lock_guard<std::mutex> guard(mMutex); - auto token = Request::MemoryDomainToken{0}; + uint32_t token = 0; if (mFreeTokens.empty()) { - token = static_cast<Request::MemoryDomainToken>(mTokenToBuffers.size()); + token = mTokenToBuffers.size(); mTokenToBuffers.push_back(std::move(buffer)); } else { token = mFreeTokens.top(); mFreeTokens.pop(); - const auto index = static_cast<uint32_t>(token); - mTokenToBuffers[index] = std::move(buffer); + mTokenToBuffers[token] = std::move(buffer); } VLOG(MEMORY) << "BufferTracker::add -- new token = " << token; return std::make_unique<Token>(token, shared_from_this()); } -std::shared_ptr<ManagedBuffer> BufferTracker::get(Request::MemoryDomainToken token) const { +std::shared_ptr<ManagedBuffer> BufferTracker::get(uint32_t token) const { std::lock_guard<std::mutex> guard(mMutex); - const auto index = static_cast<uint32_t>(token); - if (mTokenToBuffers.size() <= index || mTokenToBuffers[index] == nullptr) { + if (mTokenToBuffers.size() <= token || mTokenToBuffers[token] == nullptr) { LOG(ERROR) << "BufferTracker::get -- unknown token " << token; return nullptr; } - return mTokenToBuffers[index]; + return mTokenToBuffers[token]; } -void BufferTracker::free(Request::MemoryDomainToken token) { +void BufferTracker::free(uint32_t token) { std::lock_guard<std::mutex> guard(mMutex); - const auto index = static_cast<uint32_t>(token); - CHECK_LT(index, mTokenToBuffers.size()); - CHECK(mTokenToBuffers[index] != nullptr); + CHECK_LT(token, mTokenToBuffers.size()); + CHECK(mTokenToBuffers[token] != nullptr); VLOG(MEMORY) << "BufferTracker::free -- release token = " << token; - mTokenToBuffers[index] = nullptr; + mTokenToBuffers[token] = nullptr; mFreeTokens.push(token); }
diff --git a/common/CpuExecutor.cpp b/common/CpuExecutor.cpp index 31b3ebc..9373d59 100644 --- a/common/CpuExecutor.cpp +++ b/common/CpuExecutor.cpp
@@ -18,15 +18,20 @@ #include "CpuExecutor.h" -#include <android-base/scopeguard.h> -#include <nnapi/SharedMemory.h> -#include <nnapi/TypeUtils.h> +#include <android/hardware_buffer.h> +#include <sys/mman.h> +#include <vndk/hardware_buffer.h> -#include <limits> +#include <Eigen/Core> #include <memory> #include <utility> #include <vector> +// b/109953668, disable OpenMP +#ifdef NNAPI_OPENMP +#include <omp.h> +#endif // NNAPI_OPENMP + #include "ControlFlow.h" #include "NeuralNetworks.h" #include "OperationResolver.h" @@ -34,34 +39,13 @@ #include "OperationsUtils.h" #include "Tracing.h" -// b/109953668, disable OpenMP -#ifdef NNAPI_OPENMP -#include <omp.h> - -#include <Eigen/Core> -#endif // NNAPI_OPENMP - -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include "operations/BidirectionalSequenceLSTM.h" -#include "operations/Cast.h" -#include "operations/EmbeddingLookup.h" -#include "operations/ExpandDims.h" -#include "operations/HashtableLookup.h" -#include "operations/LSHProjection.h" -#include "operations/LSTM.h" -#include "operations/MaximumMinimum.h" -#include "operations/Multinomial.h" -#include "operations/Pow.h" -#include "operations/QuantizedLSTM.h" -#include "operations/RNN.h" -#include "operations/SVDF.h" -#include "operations/Tile.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION - namespace android { namespace nn { + namespace { +using namespace hal; + class OperationExecutionContext : public IOperationExecutionContext { DISALLOW_IMPLICIT_CONSTRUCTORS(OperationExecutionContext); @@ -73,7 +57,7 @@ OperandType getInputType(uint32_t index) const override; Shape getInputShape(uint32_t index) const override; const void* getInputBuffer(uint32_t index) const override; - const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override; + const OperandExtraParams getInputExtraParams(uint32_t index) const override; uint32_t getNumOutputs() const override; OperandType getOutputType(uint32_t index) const override; @@ -131,7 +115,7 @@ return getInputInfo(index)->buffer; } -const Operand::ExtraParams& OperationExecutionContext::getInputExtraParams(uint32_t index) const { +const OperandExtraParams OperationExecutionContext::getInputExtraParams(uint32_t index) const { return getInputInfo(index)->extraParams; } @@ -168,7 +152,7 @@ bool setInfoAndAllocateIfNeeded(RunTimeOperandInfo* info, const Shape& shape, int* result) { // For user-provided model output operands, the parameters must match the Shape // calculated from the preparation step. - if (info->lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT) { + if (info->lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { if (info->type != shape.type) { LOG(ERROR) << "Invalid type for model output"; *result = ANEURALNETWORKS_OP_FAILED; @@ -193,7 +177,7 @@ auto combined = combineDimensions(shape.dimensions, info->dimensions); if (!combined.has_value()) { - LOG(ERROR) << "Invalid dimensions for model operand: " << combined.error(); + LOG(ERROR) << "Invalid dimensions for model operand"; *result = ANEURALNETWORKS_OP_FAILED; return false; } @@ -205,7 +189,7 @@ // TODO(b/153081229): We bypass the overflow check on extension operands because we do not know // the sizes of extension types. - if (!isExtension(info->type) && + if (!isExtensionOperandType(info->type) && nonExtensionOperandSizeOfDataOverflowsUInt32(info->type, info->dimensions)) { LOG(ERROR) << "Operand data size overflows uint32_t"; *result = ANEURALNETWORKS_OP_FAILED; @@ -213,9 +197,9 @@ } // Allocate the buffer only if the combined dimension is fully specified - if (info->buffer == nullptr && (info->lifetime == Operand::LifeTime::TEMPORARY_VARIABLE || - info->lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT)) { - if (isExtension(info->type)) { + if (info->buffer == nullptr && (info->lifetime == OperandLifeTime::TEMPORARY_VARIABLE || + info->lifetime == OperandLifeTime::SUBGRAPH_OUTPUT)) { + if (isExtensionOperandType(info->type)) { LOG(ERROR) << "Cannot allocate a variable of an extension type"; *result = ANEURALNETWORKS_OP_FAILED; return false; @@ -246,21 +230,21 @@ } bool OperationExecutionContext::isOmittedInput(uint32_t index) const { - return getInputInfo(index)->lifetime == Operand::LifeTime::NO_VALUE; + return getInputInfo(index)->lifetime == OperandLifeTime::NO_VALUE; } bool OperationExecutionContext::isOmittedOutput(uint32_t index) const { - return getOutputInfo(index)->lifetime == Operand::LifeTime::NO_VALUE; + return getOutputInfo(index)->lifetime == OperandLifeTime::NO_VALUE; } bool OperationExecutionContext::checkNoOmittedOperand() const { for (uint32_t i = 0; i < operation->inputs.size(); i++) { - NN_RET_CHECK(!isOmittedInput(i)) - << operation->type << " input operand " << i << " is required but missing."; + NN_RET_CHECK(!isOmittedInput(i)) << getOperationName(operation->type) << " input operand " + << i << " is required but missing."; } for (uint32_t i = 0; i < operation->outputs.size(); i++) { - NN_RET_CHECK(!isOmittedOutput(i)) - << operation->type << " output operand " << i << " is required but missing."; + NN_RET_CHECK(!isOmittedOutput(i)) << getOperationName(operation->type) << " output operand " + << i << " is required but missing."; } return true; } @@ -270,8 +254,9 @@ if (isOmittedInput(i)) continue; for (uint32_t j = 0; j < getInputInfo(i)->dimensions.size(); j++) { NN_RET_CHECK_NE(getInputInfo(i)->dimensions[j], 0) - << operation->type << " does not support zero-sized tensor, but input " << i - << " dimension " << j << " is 0."; + << getOperationName(operation->type) + << " does not support zero-sized tensor, but input " << i << " dimension " << j + << " is 0."; } } return true; @@ -286,61 +271,158 @@ // when the RunTimePoolInfo is destroyed or is assigned to. class RunTimePoolInfo::RunTimePoolInfoImpl { public: - RunTimePoolInfoImpl(SharedMemory memory, Mapping mapping); + RunTimePoolInfoImpl(const hidl_memory& hidlMemory, uint8_t* buffer, const sp<IMemory>& memory, + AHardwareBuffer* hardwareBuffer, uint32_t size); - uint8_t* getBuffer() const; - uint32_t getSize() const; + // rule of five... + ~RunTimePoolInfoImpl(); + RunTimePoolInfoImpl(const RunTimePoolInfoImpl&) = delete; + RunTimePoolInfoImpl(RunTimePoolInfoImpl&&) noexcept = delete; + RunTimePoolInfoImpl& operator=(const RunTimePoolInfoImpl&) = delete; + RunTimePoolInfoImpl& operator=(RunTimePoolInfoImpl&&) noexcept = delete; + + uint8_t* getBuffer() const { return mBuffer; } + uint32_t getSize() const { return mSize; } bool flush() const; - const SharedMemory& getMemory() const { return mMemory; } + const hidl_memory& getHidlMemory() const { return mHidlMemory; } private: - const SharedMemory mMemory; - const Mapping mMapping; + const hidl_memory mHidlMemory; // always used + uint8_t* const mBuffer = nullptr; // always used + const sp<IMemory> mMemory; // only used when hidlMemory.name() == "ashmem" + AHardwareBuffer* + mAHardwareBuffer; // only used when hidlMemory.name() == "hardware_buffer_blob" + const uint32_t mSize; }; -RunTimePoolInfo::RunTimePoolInfoImpl::RunTimePoolInfoImpl(SharedMemory memory, Mapping mapping) - : mMemory(std::move(memory)), mMapping(std::move(mapping)) {} +RunTimePoolInfo::RunTimePoolInfoImpl::RunTimePoolInfoImpl(const hidl_memory& hidlMemory, + uint8_t* buffer, + const sp<IMemory>& memory, + AHardwareBuffer* hardwareBuffer, + uint32_t size) + : mHidlMemory(hidlMemory), + mBuffer(buffer), + mMemory(memory), + mAHardwareBuffer(hardwareBuffer), + mSize(size) {} -uint8_t* RunTimePoolInfo::RunTimePoolInfoImpl::getBuffer() const { - return std::visit( - [](auto* pointer) { - // Writing to a const buffer may lead to undefined behavior. - // TODO: Refactor the code to avoid the const_cast. - return static_cast<uint8_t*>(const_cast<void*>(pointer)); - }, - mMapping.pointer); -} +RunTimePoolInfo::RunTimePoolInfoImpl::~RunTimePoolInfoImpl() { + if (mBuffer == nullptr) { + return; + } -uint32_t RunTimePoolInfo::RunTimePoolInfoImpl::getSize() const { - CHECK_LE(mMapping.size, std::numeric_limits<uint32_t>::max()); - return static_cast<uint32_t>(mMapping.size); + const auto& memType = mHidlMemory.name(); + if (memType == "ashmem") { + // nothing to do + } else if (memType == "mmap_fd") { + const size_t size = mHidlMemory.size(); + if (munmap(mBuffer, size)) { + LOG(ERROR) << "RunTimePoolInfoImpl::~RunTimePoolInfo(): Can't munmap"; + } + } else if (memType == "hardware_buffer_blob") { + AHardwareBuffer_unlock(mAHardwareBuffer, nullptr); + } else if (memType == "") { + // Represents a POINTER argument; nothing to do + } else { + LOG(ERROR) << "RunTimePoolInfoImpl::~RunTimePoolInfoImpl(): unsupported hidl_memory type"; + } + + if (mAHardwareBuffer != nullptr) { + AHardwareBuffer_release(mAHardwareBuffer); + } } // Making sure the output data are correctly updated after execution. bool RunTimePoolInfo::RunTimePoolInfoImpl::flush() const { - return nn::flush(mMapping); + const auto& memType = mHidlMemory.name(); + if (memType == "mmap_fd") { + const int prot = mHidlMemory.handle()->data[1]; + if (prot & PROT_WRITE) { + const size_t size = mHidlMemory.size(); + return msync(mBuffer, size, MS_SYNC) == 0; + } + } + // No-op for other types of memory. + return true; } // TODO: short term, make share memory mapping and updating a utility function. // TODO: long term, implement mmap_fd as a hidl IMemory service. -std::optional<RunTimePoolInfo> RunTimePoolInfo::createFromMemory(const SharedMemory& memory) { - auto mapping = map(memory); - if (!mapping.has_value()) { - LOG(ERROR) << "Can't map shared memory: " << mapping.error().message; +std::optional<RunTimePoolInfo> RunTimePoolInfo::createFromHidlMemory( + const hidl_memory& hidlMemory) { + uint8_t* buffer = nullptr; + sp<IMemory> memory; + AHardwareBuffer* hardwareBuffer = nullptr; + + const auto& memType = hidlMemory.name(); + if (memType == "ashmem") { + memory = mapMemory(hidlMemory); + if (memory == nullptr) { + LOG(ERROR) << "Can't map shared memory."; + return std::nullopt; + } + buffer = static_cast<uint8_t*>(static_cast<void*>(memory->getPointer())); + if (buffer == nullptr) { + LOG(ERROR) << "Can't access shared memory."; + return std::nullopt; + } + } else if (memType == "mmap_fd") { + size_t size = hidlMemory.size(); + int fd = hidlMemory.handle()->data[0]; + int prot = hidlMemory.handle()->data[1]; + size_t offset = getSizeFromInts(hidlMemory.handle()->data[2], hidlMemory.handle()->data[3]); + buffer = static_cast<uint8_t*>(mmap(nullptr, size, prot, MAP_SHARED, fd, offset)); + if (buffer == MAP_FAILED) { + LOG(ERROR) << "RunTimePoolInfo::set(): Can't mmap the file descriptor."; + return std::nullopt; + } + } else if (memType == "hardware_buffer_blob") { + auto handle = hidlMemory.handle(); + auto format = AHARDWAREBUFFER_FORMAT_BLOB; + auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN; + const uint32_t width = hidlMemory.size(); + const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer. + const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer. + const uint32_t stride = hidlMemory.size(); + + AHardwareBuffer_Desc desc{ + .width = width, + .format = format, + .height = height, + .layers = layers, + .usage = usage, + .stride = stride, + }; + status_t status = AHardwareBuffer_createFromHandle( + &desc, handle, AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &hardwareBuffer); + if (status != NO_ERROR) { + LOG(ERROR) << "RunTimePoolInfo Can't create AHardwareBuffer from handle. Error: " + << status; + return std::nullopt; + } + void* gBuffer = nullptr; + status = AHardwareBuffer_lock(hardwareBuffer, usage, -1, nullptr, &gBuffer); + if (status != NO_ERROR) { + LOG(ERROR) << "RunTimePoolInfo Can't lock the AHardwareBuffer. Error: " << status; + return std::nullopt; + } + buffer = static_cast<uint8_t*>(gBuffer); + } else { + LOG(ERROR) << "RunTimePoolInfo::set(): unsupported hidl_memory type"; return std::nullopt; } - const auto impl = - std::make_shared<const RunTimePoolInfoImpl>(memory, std::move(mapping).value()); - return RunTimePoolInfo(impl); + + const auto impl = std::make_shared<const RunTimePoolInfoImpl>( + hidlMemory, buffer, memory, hardwareBuffer, hidlMemory.size()); + return {RunTimePoolInfo(impl)}; } RunTimePoolInfo RunTimePoolInfo::createFromExistingBuffer(uint8_t* buffer, uint32_t size) { - auto mapping = Mapping{.pointer = buffer, .size = size}; - const auto impl = std::make_shared<const RunTimePoolInfoImpl>(std::make_shared<const Memory>(), - std::move(mapping)); - return RunTimePoolInfo(impl); + const auto impl = std::make_shared<const RunTimePoolInfoImpl>(hidl_memory{}, buffer, nullptr, + nullptr, size); + return {impl}; } RunTimePoolInfo::RunTimePoolInfo(const std::shared_ptr<const RunTimePoolInfoImpl>& impl) @@ -358,17 +440,17 @@ return mImpl->flush(); } -const SharedMemory& RunTimePoolInfo::getMemory() const { - return mImpl->getMemory(); +const hidl_memory& RunTimePoolInfo::getHidlMemory() const { + return mImpl->getHidlMemory(); } -bool setRunTimePoolInfosFromCanonicalMemories(std::vector<RunTimePoolInfo>* poolInfos, - const std::vector<SharedMemory>& pools) { +bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos, + const hidl_vec<hidl_memory>& pools) { CHECK(poolInfos != nullptr); poolInfos->clear(); poolInfos->reserve(pools.size()); for (const auto& pool : pools) { - if (std::optional<RunTimePoolInfo> poolInfo = RunTimePoolInfo::createFromMemory(pool)) { + if (std::optional<RunTimePoolInfo> poolInfo = RunTimePoolInfo::createFromHidlMemory(pool)) { poolInfos->push_back(*poolInfo); } else { LOG(ERROR) << "Could not map pools"; @@ -380,18 +462,18 @@ } bool setRunTimePoolInfosFromMemoryPools(std::vector<RunTimePoolInfo>* poolInfos, - const std::vector<Request::MemoryPool>& pools) { + const hidl_vec<Request::MemoryPool>& pools) { CHECK(poolInfos != nullptr); poolInfos->clear(); poolInfos->reserve(pools.size()); for (const auto& pool : pools) { - if (!std::holds_alternative<SharedMemory>(pool)) { + if (pool.getDiscriminator() != Request::MemoryPool::hidl_discriminator::hidlMemory) { LOG(ERROR) << "Unknown memory token"; poolInfos->clear(); return false; } if (std::optional<RunTimePoolInfo> poolInfo = - RunTimePoolInfo::createFromMemory(std::get<SharedMemory>(pool))) { + RunTimePoolInfo::createFromHidlMemory(pool.hidlMemory())) { poolInfos->push_back(*poolInfo); } else { LOG(ERROR) << "Could not map pools"; @@ -402,7 +484,6 @@ return true; } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION template <typename T> inline bool convertToNhwcImpl(T* to, const T* from, const std::vector<uint32_t>& fromDim) { uint32_t spatialSize = fromDim[2] * fromDim[3]; @@ -438,7 +519,7 @@ LOG(ERROR) << "Error converting a non-4-D tensor to NHWC layout"; return false; } - to.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE; + to.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; if (data_layout) { // convert dimensions Shape inShape = from.shape(); @@ -520,7 +601,6 @@ } return true; } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION // Decrements the usage count for the operands listed. Frees the memory // allocated for any temporary variable with a count of zero. @@ -545,7 +625,7 @@ // that are inputs to an operation. static void freeUnusedSubgraphOperands(std::vector<RunTimeOperandInfo>* operands) { for (auto& info : *operands) { - if (info.lifetime == Operand::LifeTime::TEMPORARY_VARIABLE && info.numberOfUsesLeft == 0 && + if (info.lifetime == OperandLifeTime::TEMPORARY_VARIABLE && info.numberOfUsesLeft == 0 && info.buffer != nullptr) { delete[] info.buffer; info.buffer = nullptr; @@ -559,8 +639,8 @@ const std::vector<RunTimePoolInfo>& modelPoolInfos, const std::vector<RunTimePoolInfo>& requestPoolInfos) { NNTRACE_CPU(NNTRACE_PHASE_EXECUTION, "run"); - VLOG(CPUEXE) << "CpuExecutor::run() with request(" << SHOW_IF_DEBUG(request) << ")"; - mModelOperandValues = model.operandValues.data(); + VLOG(CPUEXE) << "CpuExecutor::run() with request(" << SHOW_IF_DEBUG(toString(request)) << ")"; + mModelOperandValues = &model.operandValues; mModelPoolInfos = &modelPoolInfos; mReferencedSubgraphs = &model.referenced; @@ -597,8 +677,8 @@ return result; } -int CpuExecutor::executeSubgraph(const Model::Subgraph& subgraph, RunTimeOperandInfo* operands) { - VLOG(CPUEXE) << "CpuExecutor::executeSubgraph " << subgraph; +int CpuExecutor::executeSubgraph(const Subgraph& subgraph, RunTimeOperandInfo* operands) { + VLOG(CPUEXE) << "CpuExecutor::executeSubgraph " << toString(subgraph); // The graph has serialized the operation in execution order. for (const auto& operation : subgraph.operations) { NN_RETURN_IF_ERROR(executeOperation(operation, operands)); @@ -606,13 +686,10 @@ return ANEURALNETWORKS_NO_ERROR; } -std::vector<RunTimeOperandInfo> CpuExecutor::initializeRunTimeInfo( - const Model::Subgraph& subgraph) { +std::vector<RunTimeOperandInfo> CpuExecutor::initializeRunTimeInfo(const Subgraph& subgraph) { VLOG(CPUEXE) << "CpuExecutor::initializeRunTimeInfo"; const size_t count = subgraph.operands.size(); std::vector<RunTimeOperandInfo> operands(count); - std::vector<uint32_t> numberOfConsumers = - countNumberOfConsumers(count, subgraph.operations).value(); for (size_t i = 0; i < count; i++) { const Operand& from = subgraph.operands[i]; RunTimeOperandInfo& to = operands[i]; @@ -624,15 +701,15 @@ to.lifetime = from.lifetime; to.extraParams = from.extraParams; switch (from.lifetime) { - case Operand::LifeTime::TEMPORARY_VARIABLE: + case OperandLifeTime::TEMPORARY_VARIABLE: to.buffer = nullptr; - to.numberOfUsesLeft = numberOfConsumers[i]; + to.numberOfUsesLeft = from.numberOfConsumers; break; - case Operand::LifeTime::CONSTANT_COPY: - to.buffer = const_cast<uint8_t*>(mModelOperandValues + from.location.offset); + case OperandLifeTime::CONSTANT_COPY: + to.buffer = const_cast<uint8_t*>(&(*mModelOperandValues)[from.location.offset]); to.numberOfUsesLeft = 0; break; - case Operand::LifeTime::CONSTANT_REFERENCE: { + case OperandLifeTime::CONSTANT_REFERENCE: { auto poolIndex = from.location.poolIndex; CHECK_LT(poolIndex, mModelPoolInfos->size()); auto& r = (*mModelPoolInfos)[poolIndex]; @@ -640,21 +717,16 @@ to.numberOfUsesLeft = 0; break; } - case Operand::LifeTime::SUBGRAPH: { + case OperandLifeTime::SUBGRAPH: { auto subgraphIndex = from.location.offset; CHECK_LT(subgraphIndex, mReferencedSubgraphs->size()); to.buffer = reinterpret_cast<uint8_t*>( - const_cast<Model::Subgraph*>(&(*mReferencedSubgraphs)[subgraphIndex])); + const_cast<Subgraph*>(&(*mReferencedSubgraphs)[subgraphIndex])); to.numberOfUsesLeft = 0; } break; - case Operand::LifeTime::POINTER: { - to.buffer = reinterpret_cast<uint8_t*>( - const_cast<void*>(std::get<const void*>(from.location.pointer))); - to.numberOfUsesLeft = 0; - } break; - case Operand::LifeTime::SUBGRAPH_INPUT: - case Operand::LifeTime::SUBGRAPH_OUTPUT: - case Operand::LifeTime::NO_VALUE: + case OperandLifeTime::SUBGRAPH_INPUT: + case OperandLifeTime::SUBGRAPH_OUTPUT: + case OperandLifeTime::NO_VALUE: to.buffer = nullptr; to.numberOfUsesLeft = 0; break; @@ -664,15 +736,15 @@ } void CpuExecutor::updateForArguments(const std::vector<uint32_t>& indexes, - const std::vector<Request::Argument>& arguments, + const hal::hidl_vec<hal::RequestArgument>& arguments, const std::vector<RunTimePoolInfo>& requestPoolInfos, RunTimeOperandInfo* operands) { CHECK_EQ(indexes.size(), arguments.size()); for (size_t i = 0; i < indexes.size(); i++) { const uint32_t operandIndex = indexes[i]; - const Request::Argument& from = arguments[i]; + const RequestArgument& from = arguments[i]; RunTimeOperandInfo& to = operands[operandIndex]; - if (!from.dimensions.empty()) { + if (from.dimensions.size() > 0) { // It's the responsibility of the caller to validate that // from.dimensions only modifies the dimensions that were // unspecified in the model. That's the case in SampleDriver.cpp @@ -680,43 +752,26 @@ // TODO make sure that's the case for the default CPU path. to.dimensions = from.dimensions; } - switch (from.lifetime) { - case Request::Argument::LifeTime::NO_VALUE: { - to.lifetime = Operand::LifeTime::NO_VALUE; - CHECK(to.buffer == nullptr); - to.length = 0; - break; - } - case Request::Argument::LifeTime::POOL: { - auto poolIndex = from.location.poolIndex; - CHECK_LT(poolIndex, requestPoolInfos.size()); - auto& r = requestPoolInfos[poolIndex]; - to.buffer = r.getBuffer() + from.location.offset; - if (from.location.offset == 0 && from.location.length == 0) { - // Use the entire memory region. - to.length = r.getSize(); - } else { - to.length = from.location.length; - } - break; - } - case Request::Argument::LifeTime::POINTER: { - constexpr auto fn = [](const void* ptr) { - return static_cast<const uint8_t*>(ptr); - }; - auto ptr = std::visit(fn, from.location.pointer); - // Writing to a const buffer may lead to undefined behavior. - // TODO: Refactor the code to avoid the const_cast. - to.buffer = const_cast<uint8_t*>(ptr); + if (from.hasNoValue) { + to.lifetime = OperandLifeTime::NO_VALUE; + CHECK(to.buffer == nullptr); + to.length = 0; + } else { + auto poolIndex = from.location.poolIndex; + CHECK_LT(poolIndex, requestPoolInfos.size()); + auto& r = requestPoolInfos[poolIndex]; + to.buffer = r.getBuffer() + from.location.offset; + if (from.location.offset == 0 && from.location.length == 0) { + // Use the entire memory region. + to.length = r.getSize(); + } else { to.length = from.location.length; - break; } } } } int CpuExecutor::executeOperation(const Operation& operation, RunTimeOperandInfo* operands) { -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION if (hasDeadlinePassed(mDeadline)) { return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT; } @@ -735,9 +790,9 @@ return result; } - // VLOG(CPUEXE) << "CpuExecutor::executeOperation(" << operation << ")"; - const std::vector<uint32_t>& ins = operation.inputs; - const std::vector<uint32_t>& outs = operation.outputs; + // VLOG(CPUEXE) << "CpuExecutor::executeOperation(" << toString(operation) << ")"; + const hidl_vec<uint32_t>& ins = operation.inputs; + const hidl_vec<uint32_t>& outs = operation.outputs; bool success = false; int result = ANEURALNETWORKS_NO_ERROR; @@ -749,30 +804,29 @@ auto allParametersPresent = [&operation, &operands, &ins, &outs](size_t requiredIns, size_t requiredOuts) -> bool { auto verify = [&operation, &operands](size_t requiredCount, - const std::vector<uint32_t>& indexes, + const hidl_vec<uint32_t>& indexes, const char* type) -> bool { size_t actualCount = indexes.size(); if (actualCount != requiredCount) { - LOG(ERROR) << operation.type << ": Invalid number of " << type << " operands. Got " - << actualCount << " of " << requiredCount; + LOG(ERROR) << getOperationName(operation.type) << ": Invalid number of " << type + << " operands. Got " << actualCount << " of " << requiredCount; return false; } for (size_t i = 0; i < actualCount; i++) { - if (operands[indexes[i]].lifetime == Operand::LifeTime::NO_VALUE) { - LOG(ERROR) << operation.type << " " << type << " operand " << i - << " is required but missing."; + if (operands[indexes[i]].lifetime == OperandLifeTime::NO_VALUE) { + LOG(ERROR) << getOperationName(operation.type) << " " << type << " operand " + << i << " is required but missing."; return false; } } return true; }; - auto verifyNoZeroSizedInputs = [&operation, - &operands](const std::vector<uint32_t>& indexes) { + auto verifyNoZeroSizedInputs = [&operation, &operands](const hidl_vec<uint32_t>& indexes) { for (size_t i = 0; i < indexes.size(); i++) { for (size_t j = 0; j < operands[indexes[i]].dimensions.size(); j++) { if (operands[indexes[i]].dimensions[j] == 0) { - LOG(ERROR) << operation.type + LOG(ERROR) << getOperationName(operation.type) << " does not support zero-sized tensor, but input " << i << " dimension " << j << " is zero."; return false; @@ -825,7 +879,7 @@ success = false; break; } - output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE; + output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; output_tmp.buffer = data_layout ? nullptr : output.buffer; output_tmp.length = data_layout ? 0 : output.length; if (!depthToSpacePrepare(input_tmp.shape(), blockSize, &outShape) || @@ -889,7 +943,7 @@ success = false; break; } - output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE; + output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; output_tmp.buffer = data_layout ? nullptr : output.buffer; output_tmp.length = data_layout ? 0 : output.length; @@ -1057,6 +1111,9 @@ if (!allParametersPresent(3, 1)) { return ANEURALNETWORKS_BAD_DATA; } + const RunTimeOperandInfo& lookups = operands[ins[HashtableLookup::kLookupTensor]]; + const RunTimeOperandInfo& keys = operands[ins[HashtableLookup::kKeyTensor]]; + const RunTimeOperandInfo& values = operands[ins[HashtableLookup::kValueTensor]]; RunTimeOperandInfo& output = operands[outs[Multinomial::kOutputTensor]]; Shape outputShape; @@ -1110,7 +1167,7 @@ success = false; break; } - output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE; + output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; output_tmp.buffer = data_layout ? nullptr : output.buffer; output_tmp.length = data_layout ? 0 : output.length; @@ -1182,7 +1239,7 @@ success = false; break; } - output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE; + output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; output_tmp.buffer = data_layout ? nullptr : output.buffer; output_tmp.length = data_layout ? 0 : output.length; @@ -1501,7 +1558,7 @@ success = false; break; } - output_tmp.lifetime = Operand::LifeTime::TEMPORARY_VARIABLE; + output_tmp.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; output_tmp.buffer = data_layout ? nullptr : output.buffer; output_tmp.length = data_layout ? 0 : output.length; @@ -1548,8 +1605,7 @@ success = groupedConvQuant8PerChannel( reinterpret_cast<const uint8_t*>(input_tmp.buffer), input_tmp.shape(), reinterpret_cast<const int8_t*>(filter.buffer), filter.shape(), - std::get<Operand::SymmPerChannelQuantParams>(filter.extraParams) - .scales.data(), + filter.extraParams.channelQuant().scales.data(), reinterpret_cast<const int32_t*>(bias.buffer), bias.shape(), padding_left, padding_right, padding_top, padding_bottom, stride_width, stride_height, numGroups, activation, @@ -1568,8 +1624,7 @@ success = groupedConvQuant8PerChannel( reinterpret_cast<const int8_t*>(input_tmp.buffer), input_tmp.shape(), reinterpret_cast<const int8_t*>(filter.buffer), filter.shape(), - std::get<Operand::SymmPerChannelQuantParams>(filter.extraParams) - .scales.data(), + filter.extraParams.channelQuant().scales.data(), reinterpret_cast<const int32_t*>(bias.buffer), bias.shape(), padding_left, padding_right, padding_top, padding_bottom, stride_width, stride_height, numGroups, activation, @@ -1648,10 +1703,11 @@ const OperationRegistration* operationRegistration = mOperationResolver->findOperation(operation.type); if (operationRegistration == nullptr) { - LOG(ERROR) << operation.type << " not registered"; + LOG(ERROR) << getOperationName(operation.type) << " not registered"; } else if (operationRegistration->prepare == nullptr || operationRegistration->execute == nullptr) { - LOG(ERROR) << "Incomplete operation registration: " << operation.type; + LOG(ERROR) << "Incomplete operation registration: " + << getOperationName(operation.type); } else { OperationExecutionContext context(&operation, operands); success = operationRegistration->flags.allowOmittedOperand || @@ -1668,15 +1724,12 @@ result = ANEURALNETWORKS_OP_FAILED; } if (result != ANEURALNETWORKS_NO_ERROR) { - LOG(ERROR) << operation.type << " failed."; + LOG(ERROR) << getOperationName(operation.type) << " failed."; + return result; } consumeOperationInputs(ins, operands); - return result; -#else - LOG(ERROR) << "Built without CPU execution support"; - return ANEURALNETWORKS_OP_FAILED; -#endif // NN_INCLUDE_CPU_IMPLEMENTATION + return ANEURALNETWORKS_NO_ERROR; } // Copies RunTimeOperandInfo, preserving the original lifetime and numberOfUsesLeft @@ -1701,8 +1754,7 @@ const uint32_t branchInputIndex = condValue ? op::kThenModelOperand : op::kElseModelOperand; const RunTimeOperandInfo& branchOperand = operands[operation.inputs[branchInputIndex]]; - const Model::Subgraph& branchSubgraph = - *reinterpret_cast<const Model::Subgraph*>(branchOperand.buffer); + const Subgraph& branchSubgraph = *reinterpret_cast<const Subgraph*>(branchOperand.buffer); std::vector<RunTimeOperandInfo> branchOperands = initializeRunTimeInfo(branchSubgraph); // Initialize inner input and output operands from outer operands. @@ -1732,10 +1784,8 @@ namespace op = operation_while; const RunTimeOperandInfo& condModelOperand = operands[operation.inputs[op::kCondModelOperand]]; const RunTimeOperandInfo& bodyModelOperand = operands[operation.inputs[op::kBodyModelOperand]]; - const Model::Subgraph& condSubgraph = - *reinterpret_cast<const Model::Subgraph*>(condModelOperand.buffer); - const Model::Subgraph& bodySubgraph = - *reinterpret_cast<const Model::Subgraph*>(bodyModelOperand.buffer); + const Subgraph& condSubgraph = *reinterpret_cast<const Subgraph*>(condModelOperand.buffer); + const Subgraph& bodySubgraph = *reinterpret_cast<const Subgraph*>(bodyModelOperand.buffer); std::vector<RunTimeOperandInfo> condOperands = initializeRunTimeInfo(condSubgraph); std::vector<RunTimeOperandInfo> bodyOperands = initializeRunTimeInfo(bodySubgraph); @@ -1750,24 +1800,6 @@ std::vector<uint8_t*> tmp1(bodySubgraph.outputIndexes.size()); std::vector<uint8_t*> tmp2(bodySubgraph.outputIndexes.size()); - // Ensure objects are freed - auto cleanupGuard = base::make_scope_guard( - [&tmp1, &tmp2, &condOperands, &bodyOperands, &operation, &operands] { - auto freeLoopOutputs = [](const std::vector<uint8_t*>& tmp) { - for (auto buffer : tmp) { - if (buffer != nullptr) { - delete[] buffer; - } - } - }; - - freeLoopOutputs(tmp1); - freeLoopOutputs(tmp2); - freeUnusedSubgraphOperands(&condOperands); - freeUnusedSubgraphOperands(&bodyOperands); - consumeOperationInputs(operation.inputs, operands); - }); - // For body outputs with unknown shape, we skip double buffering and // allocate on each iteration instead. This allows growing output tensors // inside a WHILE loop. @@ -1790,7 +1822,7 @@ condOutput.length = sizeof(condValue); std::chrono::nanoseconds timeoutDuration(mLoopTimeoutDuration); - const auto startTime = Clock::now(); + const auto startTime = std::chrono::steady_clock::now(); for (uint32_t iteration = 0;; ++iteration) { VLOG(CPUEXE) << "CpuExecutor::executeWhileOperation: iteration " << iteration; if (iteration != 0) { @@ -1807,7 +1839,7 @@ break; } - const auto duration = Clock::now() - startTime; + const auto duration = std::chrono::steady_clock::now() - startTime; if (duration > timeoutDuration) { LOG(ERROR) << "CpuExecutor::executeWhileOperation: timed out after " << std::chrono::duration_cast<std::chrono::milliseconds>(duration).count() @@ -1854,6 +1886,19 @@ std::memcpy(outerOperand.buffer, innerOperand.buffer, innerOperand.length); } + auto freeLoopOutputs = [](const std::vector<uint8_t*>& tmp) { + for (auto buffer : tmp) { + if (buffer != nullptr) { + delete[] buffer; + } + } + }; + freeLoopOutputs(tmp1); + freeLoopOutputs(tmp2); + freeUnusedSubgraphOperands(&condOperands); + freeUnusedSubgraphOperands(&bodyOperands); + consumeOperationInputs(operation.inputs, operands); + return ANEURALNETWORKS_NO_ERROR; } @@ -1865,8 +1910,6 @@ const RunTimeOperandInfo& from = operands[operandIndex]; mOutputShapes[i].dimensions = from.dimensions; mOutputShapes[i].isSufficient = from.isSufficient(); - VLOG(EXECUTION) << "CpuExecutor::setOutputShapes: mOutputShapes[" << i - << "] = " << mOutputShapes[i]; } }
diff --git a/common/DynamicCLDeps.cpp b/common/DynamicCLDeps.cpp deleted file mode 100644 index f7cfc25..0000000 --- a/common/DynamicCLDeps.cpp +++ /dev/null
@@ -1,67 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "DynamicCLDeps" - -#include "DynamicCLDeps.h" - -#include <android-base/logging.h> -#include <dlfcn.h> - -namespace android::nn { -namespace { - -void* loadFunction(void* handle, const char* name) { - CHECK(handle != nullptr); - void* fn = dlsym(handle, name); - CHECK(fn != nullptr) << "Unable to open function " << name << ": " << dlerror(); - return fn; -} - -#define NN_LOAD_FUNCTION(name, symbol) \ - impl.name = reinterpret_cast<decltype(impl.name)>(loadFunction(handle, #symbol)); - -const CompatibilityLayerMemory loadCompatibilityLayerMemoryHelper() { - CompatibilityLayerMemory impl = {}; - - // libandroid.so is NOT accessible for non-NDK apps and - // libcutils.so is NOT accessible for NDK apps. - // Hence we try to load one or the other in order to cover both cases. - void* handle = dlopen("libandroid.so", RTLD_LAZY | RTLD_LOCAL); - if (handle != nullptr) { - NN_LOAD_FUNCTION(create, ASharedMemory_create); - NN_LOAD_FUNCTION(getSize, ASharedMemory_getSize); - } else { - handle = dlopen("libcutils.so", RTLD_LAZY | RTLD_LOCAL); - CHECK(handle != nullptr) << "Unable to open either libandroid.so or libcutils.so: " - << dlerror(); - NN_LOAD_FUNCTION(create, ashmem_create_region); - NN_LOAD_FUNCTION(getSize, ashmem_get_size_region); - } - - return impl; -} - -#undef NN_LOAD_FUNCTION - -} // namespace - -const CompatibilityLayerMemory& loadCompatibilityLayerMemory() { - static const CompatibilityLayerMemory impl = loadCompatibilityLayerMemoryHelper(); - return impl; -} - -} // namespace android::nn
diff --git a/common/DynamicCLDeps.h b/common/DynamicCLDeps.h deleted file mode 100644 index 358bd4a..0000000 --- a/common/DynamicCLDeps.h +++ /dev/null
@@ -1,33 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_DYNAMIC_CL_DEPS_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_DYNAMIC_CL_DEPS_H - -#include <cstdint> - -namespace android::nn { - -struct CompatibilityLayerMemory { - int (*create)(const char* name, size_t size); - size_t (*getSize)(int fd); -}; - -const CompatibilityLayerMemory& loadCompatibilityLayerMemory(); - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_DYNAMIC_CL_DEPS_H
diff --git a/common/ExecutionBurstController.cpp b/common/ExecutionBurstController.cpp index ac49448..8463df8 100644 --- a/common/ExecutionBurstController.cpp +++ b/common/ExecutionBurstController.cpp
@@ -25,7 +25,6 @@ #include <limits> #include <memory> #include <string> -#include <thread> #include <tuple> #include <utility> #include <vector> @@ -37,6 +36,8 @@ namespace android::nn { namespace { +using namespace hal; + using V1_2::FmqRequestDatum; using V1_2::FmqResultDatum; using V1_2::IBurstCallback; @@ -44,10 +45,10 @@ using FmqRequestDescriptor = hardware::MQDescriptorSync<FmqRequestDatum>; using FmqResultDescriptor = hardware::MQDescriptorSync<FmqResultDatum>; -constexpr V1_2::Timing kNoTiming12 = {std::numeric_limits<uint64_t>::max(), - std::numeric_limits<uint64_t>::max()}; +constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(), + std::numeric_limits<uint64_t>::max()}; -class BurstContextDeathHandler : public hardware::hidl_death_recipient { +class BurstContextDeathHandler : public hidl_death_recipient { public: using Callback = std::function<void()>; @@ -67,7 +68,7 @@ } // anonymous namespace // serialize a request into a packet -std::vector<FmqRequestDatum> serialize(const V1_0::Request& request, V1_2::MeasureTiming measure, +std::vector<FmqRequestDatum> serialize(const V1_0::Request& request, MeasureTiming measure, const std::vector<int32_t>& slots) { // count how many elements need to be sent for a request size_t count = 2 + request.inputs.size() + request.outputs.size() + request.pools.size(); @@ -148,11 +149,11 @@ } // deserialize a packet into the result -std::optional<std::tuple<V1_0::ErrorStatus, std::vector<V1_2::OutputShape>, V1_2::Timing>> -deserialize(const std::vector<FmqResultDatum>& data) { +std::optional<std::tuple<V1_0::ErrorStatus, std::vector<OutputShape>, Timing>> deserialize( + const std::vector<FmqResultDatum>& data) { using discriminator = FmqResultDatum::hidl_discriminator; - std::vector<V1_2::OutputShape> outputShapes; + std::vector<OutputShape> outputShapes; size_t index = 0; // validate packet information @@ -217,7 +218,7 @@ } // unpackage execution timing - const V1_2::Timing timing = data[index].executionTiming(); + const Timing timing = data[index].executionTiming(); index++; // validate packet information @@ -253,7 +254,7 @@ std::chrono::microseconds pollingTimeWindow) : mFmqResultChannel(std::move(fmqResultChannel)), kPollingTimeWindow(pollingTimeWindow) {} -std::optional<std::tuple<V1_0::ErrorStatus, std::vector<V1_2::OutputShape>, V1_2::Timing>> +std::optional<std::tuple<V1_0::ErrorStatus, std::vector<OutputShape>, Timing>> ResultChannelReceiver::getBlocking() { const auto packet = getPacketBlocking(); if (!packet) { @@ -274,13 +275,14 @@ // TODO: look for a different/better way to signal/notify the futex to // wake up any thread waiting on it FmqResultDatum datum; - datum.packetInformation({/*.packetSize=*/0, - /*.errorStatus=*/V1_0::ErrorStatus::GENERAL_FAILURE, + datum.packetInformation({/*.packetSize=*/0, /*.errorStatus=*/V1_0::ErrorStatus::GENERAL_FAILURE, /*.numberOfOperands=*/0}); mFmqResultChannel->writeBlocking(&datum, 1); } std::optional<std::vector<FmqResultDatum>> ResultChannelReceiver::getPacketBlocking() { + using discriminator = FmqResultDatum::hidl_discriminator; + if (!mValid) { return std::nullopt; } @@ -311,8 +313,6 @@ } return std::make_optional(std::move(packet)); } - - std::this_thread::yield(); } // If we get to this point, we either stopped polling because it was taking @@ -365,7 +365,7 @@ RequestChannelSender::RequestChannelSender(std::unique_ptr<FmqRequestChannel> fmqRequestChannel) : mFmqRequestChannel(std::move(fmqRequestChannel)) {} -bool RequestChannelSender::send(const V1_0::Request& request, V1_2::MeasureTiming measure, +bool RequestChannelSender::send(const V1_0::Request& request, MeasureTiming measure, const std::vector<int32_t>& slots) { const std::vector<FmqRequestDatum> serialized = serialize(request, measure, slots); return sendPacket(serialized); @@ -391,31 +391,30 @@ mValid = false; } -hardware::Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories( - const hardware::hidl_vec<int32_t>& slots, getMemories_cb cb) { +Return<void> ExecutionBurstController::ExecutionBurstCallback::getMemories( + const hidl_vec<int32_t>& slots, getMemories_cb cb) { std::lock_guard<std::mutex> guard(mMutex); // get all memories - hardware::hidl_vec<hardware::hidl_memory> memories(slots.size()); + hidl_vec<hidl_memory> memories(slots.size()); std::transform(slots.begin(), slots.end(), memories.begin(), [this](int32_t slot) { - return slot < mMemoryCache.size() ? mMemoryCache[slot] : hardware::hidl_memory{}; + return slot < mMemoryCache.size() ? mMemoryCache[slot] : hidl_memory{}; }); // ensure all memories are valid if (!std::all_of(memories.begin(), memories.end(), - [](const hardware::hidl_memory& memory) { return memory.valid(); })) { + [](const hidl_memory& memory) { return memory.valid(); })) { cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {}); - return hardware::Void(); + return Void(); } // return successful cb(V1_0::ErrorStatus::NONE, std::move(memories)); - return hardware::Void(); + return Void(); } std::vector<int32_t> ExecutionBurstController::ExecutionBurstCallback::getSlots( - const hardware::hidl_vec<hardware::hidl_memory>& memories, - const std::vector<intptr_t>& keys) { + const hidl_vec<hidl_memory>& memories, const std::vector<intptr_t>& keys) { std::lock_guard<std::mutex> guard(mMutex); // retrieve (or bind) all slots corresponding to memories @@ -442,8 +441,8 @@ return {true, slot}; } -int32_t ExecutionBurstController::ExecutionBurstCallback::getSlotLocked( - const hardware::hidl_memory& memory, intptr_t key) { +int32_t ExecutionBurstController::ExecutionBurstCallback::getSlotLocked(const hidl_memory& memory, + intptr_t key) { auto iter = mMemoryIdToSlot.find(key); if (iter == mMemoryIdToSlot.end()) { const int32_t slot = allocateSlotLocked(); @@ -506,7 +505,7 @@ // configure burst V1_0::ErrorStatus errorStatus; sp<IBurstContext> burstContext; - const hardware::Return<void> ret = preparedModel->configureExecutionBurst( + const Return<void> ret = preparedModel->configureExecutionBurst( callback, *requestChannelDescriptor, *resultChannelDescriptor, [&errorStatus, &burstContext](V1_0::ErrorStatus status, const sp<IBurstContext>& context) { @@ -542,7 +541,7 @@ // proactively handle service crashes. If the linkToDeath call fails, // asynchronous calls are susceptible to hangs if the service crashes before // providing the response. - const hardware::Return<bool> deathHandlerRet = burstContext->linkToDeath(deathHandler, 0); + const Return<bool> deathHandlerRet = burstContext->linkToDeath(deathHandler, 0); if (!deathHandlerRet.isOk() || deathHandlerRet != true) { LOG(ERROR) << "ExecutionBurstController::create -- Failed to register a death recipient " "for the IBurstContext object."; @@ -558,7 +557,7 @@ const std::shared_ptr<RequestChannelSender>& requestChannelSender, const std::shared_ptr<ResultChannelReceiver>& resultChannelReceiver, const sp<IBurstContext>& burstContext, const sp<ExecutionBurstCallback>& callback, - const sp<hardware::hidl_death_recipient>& deathHandler) + const sp<hidl_death_recipient>& deathHandler) : mRequestChannelSender(requestChannelSender), mResultChannelReceiver(resultChannelReceiver), mBurstContext(burstContext), @@ -575,17 +574,17 @@ } } -static std::tuple<int, std::vector<V1_2::OutputShape>, V1_2::Timing, bool> getExecutionResult( - V1_0::ErrorStatus status, std::vector<V1_2::OutputShape> outputShapes, V1_2::Timing timing, +static std::tuple<int, std::vector<OutputShape>, Timing, bool> getExecutionResult( + V1_0::ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing, bool fallback) { auto [n, checkedOutputShapes, checkedTiming] = getExecutionResult(convertToV1_3(status), std::move(outputShapes), timing); - return {n, convertToV1_2(checkedOutputShapes), convertToV1_2(checkedTiming), fallback}; + return {n, std::move(checkedOutputShapes), checkedTiming, fallback}; } -std::tuple<int, std::vector<V1_2::OutputShape>, V1_2::Timing, bool> -ExecutionBurstController::compute(const V1_0::Request& request, V1_2::MeasureTiming measure, - const std::vector<intptr_t>& memoryIds) { +std::tuple<int, std::vector<OutputShape>, Timing, bool> ExecutionBurstController::compute( + const V1_0::Request& request, MeasureTiming measure, + const std::vector<intptr_t>& memoryIds) { // This is the first point when we know an execution is occurring, so begin // to collect systraces. Note that the first point we can begin collecting // systraces in ExecutionBurstServer is when the RequestChannelReceiver @@ -601,7 +600,7 @@ if (!success) { LOG(ERROR) << "Error sending FMQ packet"; // only use fallback execution path if the packet could not be sent - return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12, + return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming, /*fallback=*/true); } @@ -610,7 +609,7 @@ if (!result) { LOG(ERROR) << "Error retrieving FMQ packet"; // only use fallback execution path if the packet could not be sent - return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming12, + return getExecutionResult(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming, /*fallback=*/false); }
diff --git a/common/ExecutionBurstServer.cpp b/common/ExecutionBurstServer.cpp index eab8e68..583ebf5 100644 --- a/common/ExecutionBurstServer.cpp +++ b/common/ExecutionBurstServer.cpp
@@ -25,26 +25,26 @@ #include <limits> #include <map> #include <memory> -#include <thread> #include <tuple> #include <utility> #include <vector> #include "HalInterfaces.h" #include "Tracing.h" -#include "Utils.h" namespace android::nn { namespace { +using namespace hal; + using hardware::MQDescriptorSync; using V1_2::FmqRequestDatum; using V1_2::FmqResultDatum; using V1_2::IBurstCallback; using V1_2::IBurstContext; -constexpr V1_2::Timing kNoTiming = {std::numeric_limits<uint64_t>::max(), - std::numeric_limits<uint64_t>::max()}; +constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(), + std::numeric_limits<uint64_t>::max()}; // DefaultBurstExecutorWithCache adapts an IPreparedModel so that it can be // used as an IBurstExecutorWithCache. Specifically, the cache simply stores the @@ -61,17 +61,17 @@ return (it != mMemoryCache.end()) && it->second.valid(); } - void addCacheEntry(const hardware::hidl_memory& memory, int32_t slot) override { + void addCacheEntry(const hidl_memory& memory, int32_t slot) override { mMemoryCache[slot] = memory; } void removeCacheEntry(int32_t slot) override { mMemoryCache.erase(slot); } - std::tuple<V1_0::ErrorStatus, hardware::hidl_vec<V1_2::OutputShape>, V1_2::Timing> execute( + std::tuple<V1_0::ErrorStatus, hidl_vec<OutputShape>, Timing> execute( const V1_0::Request& request, const std::vector<int32_t>& slots, - V1_2::MeasureTiming measure) override { + MeasureTiming measure) override { // convert slots to pools - hardware::hidl_vec<hardware::hidl_memory> pools(slots.size()); + hidl_vec<hidl_memory> pools(slots.size()); std::transform(slots.begin(), slots.end(), pools.begin(), [this](int32_t slot) { return mMemoryCache[slot]; }); @@ -81,20 +81,18 @@ // setup execution V1_0::ErrorStatus returnedStatus = V1_0::ErrorStatus::GENERAL_FAILURE; - hardware::hidl_vec<V1_2::OutputShape> returnedOutputShapes; - V1_2::Timing returnedTiming; + hidl_vec<OutputShape> returnedOutputShapes; + Timing returnedTiming; auto cb = [&returnedStatus, &returnedOutputShapes, &returnedTiming]( - V1_0::ErrorStatus status, - const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, - const V1_2::Timing& timing) { + V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes, + const Timing& timing) { returnedStatus = status; returnedOutputShapes = outputShapes; returnedTiming = timing; }; // execute - const hardware::Return<void> ret = - mpPreparedModel->executeSynchronously(fullRequest, measure, cb); + const Return<void> ret = mpPreparedModel->executeSynchronously(fullRequest, measure, cb); if (!ret.isOk() || returnedStatus != V1_0::ErrorStatus::NONE) { LOG(ERROR) << "IPreparedModelAdapter::execute -- Error executing"; return {returnedStatus, std::move(returnedOutputShapes), kNoTiming}; @@ -105,15 +103,14 @@ private: V1_2::IPreparedModel* const mpPreparedModel; - std::map<int32_t, hardware::hidl_memory> mMemoryCache; + std::map<int32_t, hidl_memory> mMemoryCache; }; } // anonymous namespace // serialize result std::vector<FmqResultDatum> serialize(V1_0::ErrorStatus errorStatus, - const std::vector<V1_2::OutputShape>& outputShapes, - V1_2::Timing timing) { + const std::vector<OutputShape>& outputShapes, Timing timing) { // count how many elements need to be sent for a request size_t count = 2 + outputShapes.size(); for (const auto& outputShape : outputShapes) { @@ -164,7 +161,7 @@ } // deserialize request -std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, V1_2::MeasureTiming>> deserialize( +std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, MeasureTiming>> deserialize( const std::vector<FmqRequestDatum>& data) { using discriminator = FmqRequestDatum::hidl_discriminator; @@ -191,7 +188,7 @@ } // unpackage input operands - std::vector<V1_0::RequestArgument> inputs; + std::vector<RequestArgument> inputs; inputs.reserve(numberOfInputOperands); for (size_t operand = 0; operand < numberOfInputOperands; ++operand) { // validate input operand information @@ -205,7 +202,7 @@ data[index].inputOperandInformation(); index++; const bool hasNoValue = operandInfo.hasNoValue; - const V1_0::DataLocation location = operandInfo.location; + const DataLocation location = operandInfo.location; const uint32_t numberOfDimensions = operandInfo.numberOfDimensions; // unpackage operand dimensions @@ -232,7 +229,7 @@ } // unpackage output operands - std::vector<V1_0::RequestArgument> outputs; + std::vector<RequestArgument> outputs; outputs.reserve(numberOfOutputOperands); for (size_t operand = 0; operand < numberOfOutputOperands; ++operand) { // validate output operand information @@ -246,7 +243,7 @@ data[index].outputOperandInformation(); index++; const bool hasNoValue = operandInfo.hasNoValue; - const V1_0::DataLocation location = operandInfo.location; + const DataLocation location = operandInfo.location; const uint32_t numberOfDimensions = operandInfo.numberOfDimensions; // unpackage operand dimensions @@ -297,7 +294,7 @@ } // unpackage measureTiming - const V1_2::MeasureTiming measure = data[index].measureTiming(); + const MeasureTiming measure = data[index].measureTiming(); index++; // validate packet information @@ -336,7 +333,7 @@ std::chrono::microseconds pollingTimeWindow) : mFmqRequestChannel(std::move(fmqRequestChannel)), kPollingTimeWindow(pollingTimeWindow) {} -std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, V1_2::MeasureTiming>> +std::optional<std::tuple<V1_0::Request, std::vector<int32_t>, MeasureTiming>> RequestChannelReceiver::getBlocking() { const auto packet = getPacketBlocking(); if (!packet) { @@ -363,6 +360,7 @@ } std::optional<std::vector<FmqRequestDatum>> RequestChannelReceiver::getPacketBlocking() { + using discriminator = FmqRequestDatum::hidl_discriminator; if (mTeardown) { return std::nullopt; @@ -401,8 +399,6 @@ } return std::make_optional(std::move(packet)); } - - std::this_thread::yield(); } // If we get to this point, we either stopped polling because it was taking @@ -468,8 +464,7 @@ : mFmqResultChannel(std::move(fmqResultChannel)) {} bool ResultChannelSender::send(V1_0::ErrorStatus errorStatus, - const std::vector<V1_2::OutputShape>& outputShapes, - V1_2::Timing timing) { + const std::vector<OutputShape>& outputShapes, Timing timing) { const std::vector<FmqResultDatum> serialized = serialize(errorStatus, outputShapes, timing); return sendPacket(serialized); } @@ -561,10 +556,10 @@ mWorker.join(); } -hardware::Return<void> ExecutionBurstServer::freeMemory(int32_t slot) { +Return<void> ExecutionBurstServer::freeMemory(int32_t slot) { std::lock_guard<std::mutex> hold(mMutex); mExecutorWithCache->removeCacheEntry(slot); - return hardware::Void(); + return Void(); } void ExecutionBurstServer::ensureCacheEntriesArePresentLocked(const std::vector<int32_t>& slots) { @@ -586,15 +581,14 @@ } V1_0::ErrorStatus errorStatus = V1_0::ErrorStatus::GENERAL_FAILURE; - std::vector<hardware::hidl_memory> returnedMemories; - auto cb = [&errorStatus, &returnedMemories]( - V1_0::ErrorStatus status, - const hardware::hidl_vec<hardware::hidl_memory>& memories) { + std::vector<hidl_memory> returnedMemories; + auto cb = [&errorStatus, &returnedMemories](V1_0::ErrorStatus status, + const hidl_vec<hidl_memory>& memories) { errorStatus = status; returnedMemories = memories; }; - const hardware::Return<void> ret = mCallback->getMemories(unknownSlots, cb); + const Return<void> ret = mCallback->getMemories(unknownSlots, cb); if (!ret.isOk() || errorStatus != V1_0::ErrorStatus::NONE || returnedMemories.size() != unknownSlots.size()) {
diff --git a/common/GraphDump.cpp b/common/GraphDump.cpp index 604bdd3..3c208cd 100644 --- a/common/GraphDump.cpp +++ b/common/GraphDump.cpp
@@ -18,8 +18,9 @@ #include "GraphDump.h" -#include <android-base/logging.h> +#include "HalInterfaces.h" +#include <android-base/logging.h> #include <algorithm> #include <iostream> #include <map> @@ -27,11 +28,11 @@ #include <string> #include <utility> -#include "LegacyUtils.h" - namespace android { namespace nn { +using namespace hal; + // class Dumper is a wrapper around an std::ostream (if instantiated // with a pointer to a stream) or around LOG(INFO) (otherwise). // @@ -111,40 +112,25 @@ return "OEM"; case OperandType::TENSOR_OEM_BYTE: return "TOEMB"; - default: { - std::ostringstream oss; - oss << type; - return oss.str(); - } + default: + return toString(type); } } // If the specified Operand of the specified Model has OperandType // nnType corresponding to C++ type cppType and is of -// Operand::LifeTime::CONSTANT_COPY, then write the Operand's value to +// OperandLifeTime::CONSTANT_COPY, then write the Operand's value to // the Dumper. namespace { template <OperandType nnType, typename cppType> void tryValueDump(Dumper& dump, const Model& model, const Operand& opnd) { - if (opnd.type != nnType) { - return; - } - - const void* pointer = nullptr; - if (opnd.lifetime == Operand::LifeTime::CONSTANT_COPY) { - pointer = model.operandValues.data() + opnd.location.offset; - } else if (opnd.lifetime == Operand::LifeTime::POINTER) { - pointer = std::get<const void*>(opnd.location.pointer); - } else { - return; - } - - if (opnd.location.length != sizeof(cppType)) { + if (opnd.type != nnType || opnd.lifetime != OperandLifeTime::CONSTANT_COPY || + opnd.location.length != sizeof(cppType)) { return; } cppType val; - memcpy(&val, pointer, sizeof(cppType)); + memcpy(&val, &model.operandValues[opnd.location.offset], sizeof(cppType)); dump << " = " << val; } } // namespace @@ -186,28 +172,25 @@ const char* kind = nullptr; const char* io = nullptr; switch (opnd.lifetime) { - case Operand::LifeTime::CONSTANT_COPY: + case OperandLifeTime::CONSTANT_COPY: kind = "COPY"; break; - case Operand::LifeTime::CONSTANT_REFERENCE: + case OperandLifeTime::CONSTANT_REFERENCE: kind = "REF"; break; - case Operand::LifeTime::SUBGRAPH_INPUT: + case OperandLifeTime::SUBGRAPH_INPUT: io = "input"; break; - case Operand::LifeTime::SUBGRAPH_OUTPUT: + case OperandLifeTime::SUBGRAPH_OUTPUT: io = "output"; break; - case Operand::LifeTime::NO_VALUE: + case OperandLifeTime::NO_VALUE: kind = "NO"; break; - case Operand::LifeTime::SUBGRAPH: + case OperandLifeTime::SUBGRAPH: kind = "SUBGRAPH"; break; - case Operand::LifeTime::POINTER: - kind = "POINTER"; - break; - case Operand::LifeTime::TEMPORARY_VARIABLE: + default: // nothing interesting break; } @@ -222,7 +205,7 @@ tryValueDump<OperandType::FLOAT32, float>(dump, model, opnd); tryValueDump<OperandType::INT32, int>(dump, model, opnd); tryValueDump<OperandType::UINT32, unsigned>(dump, model, opnd); - if (!opnd.dimensions.empty()) { + if (opnd.dimensions.size()) { dump << "("; for (unsigned i = 0, e = opnd.dimensions.size(); i < e; i++) { if (i > 0) { @@ -247,7 +230,7 @@ dump << " ordering=out"; } } - dump << " label=\"" << i << ": " << operation.type << "\"]" << Dumper::endl; + dump << " label=\"" << i << ": " << toString(operation.type) << "\"]" << Dumper::endl; { // operation inputs for (unsigned in = 0, inE = operation.inputs.size(); in < inE; in++) {
diff --git a/common/HalBufferTracker.cpp b/common/HalBufferTracker.cpp deleted file mode 100644 index 00caf4b..0000000 --- a/common/HalBufferTracker.cpp +++ /dev/null
@@ -1,228 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "HalBufferTracker.h" - -#include <android-base/macros.h> - -#include <memory> -#include <mutex> -#include <set> -#include <stack> -#include <utility> -#include <vector> - -#include "CpuExecutor.h" -#include "HalInterfaces.h" -#include "Utils.h" -#include "nnapi/TypeUtils.h" - -namespace android::nn { - -std::shared_ptr<HalManagedBuffer> HalManagedBuffer::create(uint32_t size, - std::set<HalPreparedModelRole> roles, - const Operand& operand) { - std::unique_ptr<uint8_t[]> buffer(new (std::nothrow) uint8_t[size]); - if (buffer == nullptr) { - return nullptr; - } - if (isExtension(operand.type)) { - LOG(ERROR) << "HalManagedBuffer cannot handle extension operands."; - return nullptr; - } - return std::make_shared<HalManagedBuffer>(std::move(buffer), size, std::move(roles), operand); -} - -HalManagedBuffer::HalManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size, - std::set<HalPreparedModelRole> roles, const Operand& operand) - : kBuffer(std::move(buffer)), - kSize(size), - kRoles(std::move(roles)), - kOperandType(operand.type), - kInitialDimensions(operand.dimensions), - mUpdatedDimensions(operand.dimensions) { - CHECK(!isExtension(kOperandType)); -} - -ErrorStatus HalManagedBuffer::validateRequest(uint32_t poolIndex, const Request& request, - const V1_3::IPreparedModel* preparedModel) const { - CHECK_LT(poolIndex, request.pools.size()); - CHECK(std::holds_alternative<Request::MemoryDomainToken>(request.pools[poolIndex])); - std::lock_guard<std::mutex> guard(mMutex); - - bool usedAsInput = false, usedAsOutput = false; - for (uint32_t i = 0; i < request.inputs.size(); i++) { - if (request.inputs[i].lifetime != Request::Argument::LifeTime::POOL) continue; - if (request.inputs[i].location.poolIndex != poolIndex) continue; - // Validate if the input role is specified during allocation. - if (kRoles.count({preparedModel, IOType::INPUT, i}) == 0) { - LOG(ERROR) << "HalManagedBuffer::validateRequest -- invalid buffer role."; - return ErrorStatus::INVALID_ARGUMENT; - } - if (!mInitialized) { - LOG(ERROR) - << "HalManagedBuffer::validateRequest -- using uninitialized buffer as input " - "request."; - return ErrorStatus::GENERAL_FAILURE; - } - auto combined = combineDimensions(mUpdatedDimensions, request.inputs[i].dimensions); - if (!combined.has_value()) { - LOG(ERROR) << "HalManagedBuffer::validateRequest -- incompatible dimensions (" - << toString(mUpdatedDimensions) << " vs " - << toString(request.inputs[i].dimensions) << ")"; - return ErrorStatus::INVALID_ARGUMENT; - } - usedAsInput = true; - } - for (uint32_t i = 0; i < request.outputs.size(); i++) { - if (request.outputs[i].lifetime != Request::Argument::LifeTime::POOL) continue; - if (request.outputs[i].location.poolIndex != poolIndex) continue; - if (usedAsInput || usedAsOutput) { - LOG(ERROR) << "HalManagedBuffer::validateRequest -- using the same device memory for " - "input/output or multiple outputs"; - return ErrorStatus::INVALID_ARGUMENT; - } - // Validate if the output role is specified during allocation. - if (kRoles.count({preparedModel, IOType::OUTPUT, i}) == 0) { - LOG(ERROR) << "HalManagedBuffer::validateRequest -- invalid buffer role."; - return ErrorStatus::INVALID_ARGUMENT; - } - auto combined = combineDimensions(kInitialDimensions, request.outputs[i].dimensions); - if (!combined.has_value()) { - LOG(ERROR) << "HalManagedBuffer::validateRequest -- incompatible dimensions (" - << toString(kInitialDimensions) << " vs " - << toString(request.outputs[i].dimensions) << ")"; - return ErrorStatus::INVALID_ARGUMENT; - } - usedAsOutput = true; - } - return ErrorStatus::NONE; -} - -ErrorStatus HalManagedBuffer::validateCopyFrom(const std::vector<uint32_t>& dimensions, - uint32_t size) const { - if (size != kSize) { - LOG(ERROR) << "HalManagedBuffer::validateCopyFrom -- invalid memory size: " << kSize - << " vs " << size; - return ErrorStatus::INVALID_ARGUMENT; - } - - if (nonExtensionOperandTypeIsScalar(static_cast<int>(kOperandType))) { - if (!dimensions.empty()) { - LOG(ERROR) << "HalManagedBuffer::validateCopyFrom -- invalid dimensions for scalar " - "operand: " - << toString(dimensions); - return ErrorStatus::INVALID_ARGUMENT; - } - return ErrorStatus::NONE; - } - - if (dimensions.empty()) { - if (tensorHasUnspecifiedDimensions(kOperandType, kInitialDimensions)) { - LOG(ERROR) - << "HalManagedBuffer::validateCopyFrom -- the initial dimensions are not fully " - "specified and no dimension update is provided: " - << toString(kInitialDimensions); - return ErrorStatus::INVALID_ARGUMENT; - } - } else { - if (tensorHasUnspecifiedDimensions(kOperandType, dimensions)) { - LOG(ERROR) - << "HalManagedBuffer::validateCopyFrom -- the updated dimensions are not fully " - "specified: " - << toString(dimensions); - return ErrorStatus::INVALID_ARGUMENT; - } - } - - const auto combined = combineDimensions(kInitialDimensions, dimensions); - if (!combined.has_value()) { - LOG(ERROR) << "HalManagedBuffer::validateCopyFrom -- incompatible dimensions (" - << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")"; - return ErrorStatus::INVALID_ARGUMENT; - } - return ErrorStatus::NONE; -} - -ErrorStatus HalManagedBuffer::validateCopyTo(uint32_t size) const { - if (size != kSize) { - LOG(ERROR) << "HalManagedBuffer::validateCopyTo -- invalid memory size: " << kSize << " vs " - << size; - return ErrorStatus::INVALID_ARGUMENT; - } - std::lock_guard<std::mutex> guard(mMutex); - if (!mInitialized) { - LOG(ERROR) << "HalManagedBuffer::validateCopyTo -- using uninitialized buffer as source."; - return ErrorStatus::GENERAL_FAILURE; - } - return ErrorStatus::NONE; -} - -bool HalManagedBuffer::updateDimensions(const std::vector<uint32_t>& dimensions) { - auto combined = combineDimensions(kInitialDimensions, dimensions); - if (!combined.has_value()) { - LOG(ERROR) << "HalManagedBuffer::updateDimensions -- incompatible dimensions (" - << toString(kInitialDimensions) << " vs " << toString(dimensions) << ")"; - return false; - } - std::lock_guard<std::mutex> guard(mMutex); - mUpdatedDimensions = std::move(combined.value()); - return true; -} - -void HalManagedBuffer::setInitialized(bool initialized) { - std::lock_guard<std::mutex> guard(mMutex); - mInitialized = initialized; -} - -std::unique_ptr<HalBufferTracker::Token> HalBufferTracker::add( - std::shared_ptr<HalManagedBuffer> buffer) { - if (buffer == nullptr) { - return nullptr; - } - std::lock_guard<std::mutex> guard(mMutex); - uint32_t token = 0; - if (mFreeTokens.empty()) { - token = mTokenToBuffers.size(); - mTokenToBuffers.push_back(std::move(buffer)); - } else { - token = mFreeTokens.top(); - mFreeTokens.pop(); - mTokenToBuffers[token] = std::move(buffer); - } - VLOG(MEMORY) << "HalBufferTracker::add -- new token = " << token; - return std::make_unique<Token>(token, shared_from_this()); -} - -std::shared_ptr<HalManagedBuffer> HalBufferTracker::get(uint32_t token) const { - std::lock_guard<std::mutex> guard(mMutex); - if (mTokenToBuffers.size() <= token || mTokenToBuffers[token] == nullptr) { - LOG(ERROR) << "HalBufferTracker::get -- unknown token " << token; - return nullptr; - } - return mTokenToBuffers[token]; -} - -void HalBufferTracker::free(uint32_t token) { - std::lock_guard<std::mutex> guard(mMutex); - CHECK_LT(token, mTokenToBuffers.size()); - CHECK(mTokenToBuffers[token] != nullptr); - VLOG(MEMORY) << "HalBufferTracker::free -- release token = " << token; - mTokenToBuffers[token] = nullptr; - mFreeTokens.push(token); -} - -} // namespace android::nn
diff --git a/common/IndexedShapeWrapper.cpp b/common/IndexedShapeWrapper.cpp index 675518c..e906659 100644 --- a/common/IndexedShapeWrapper.cpp +++ b/common/IndexedShapeWrapper.cpp
@@ -18,10 +18,6 @@ #include "IndexedShapeWrapper.h" -#include <vector> - -#include "LegacyUtils.h" - namespace android { namespace nn {
diff --git a/common/LegacyHalUtils.cpp b/common/LegacyHalUtils.cpp deleted file mode 100644 index b46560d..0000000 --- a/common/LegacyHalUtils.cpp +++ /dev/null
@@ -1,1791 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "Utils" - -#include "LegacyHalUtils.h" - -#include <nnapi/TypeUtils.h> -#include <nnapi/hal/1.0/Conversions.h> -#include <nnapi/hal/1.1/Conversions.h> -#include <nnapi/hal/1.2/Conversions.h> -#include <nnapi/hal/1.3/Conversions.h> -#include <nnapi/hal/aidl/Conversions.h> - -#include <algorithm> -#include <limits> -#include <set> -#include <string> -#include <tuple> -#include <type_traits> -#include <utility> -#include <vector> - -#include "CpuExecutor.h" -#include "NeuralNetworks.h" -#include "ValidateHal.h" - -namespace android { -namespace nn { - -constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX}; - -template <typename Type> -static Type handleError(GeneralResult<Type> result) { - CHECK(result.has_value()) << "Unhandled error (" << result.error().code - << "): " << result.error().message; - return std::move(result).value(); -} - -LegacyOptionalTimePoint makeDeadline(const V1_3::OptionalTimePoint& timePoint) { - using Disc = V1_3::OptionalTimePoint::hidl_discriminator; - if (timePoint.getDiscriminator() == Disc::none) { - return LegacyOptionalTimePoint{}; - } - const uint64_t count = timePoint.nanosecondsSinceEpoch(); - return LegacyTimePoint{LegacyDuration{count}}; -} - -LegacyOptionalTimePoint makeDeadline(const V1_3::OptionalTimeoutDuration& optionalDuration) { - if (optionalDuration.getDiscriminator() == - V1_3::OptionalTimeoutDuration::hidl_discriminator::none) { - return LegacyOptionalTimePoint{}; - } - - const auto duration = LegacyDuration{optionalDuration.nanoseconds()}; - constexpr auto kMaxTime = LegacyTimePoint::max(); - const auto currentTime = LegacyClock::now(); - - // If there would be an overflow, use the max value. - if (duration > kMaxTime - currentTime) { - return kMaxTime; - } - return currentTime + duration; -} - -bool hasDeadlinePassed(const LegacyOptionalTimePoint& deadline) { - if (!deadline.has_value()) { - return false; - } - return LegacyClock::now() >= *deadline; -} - -bool isExtensionOperandType(V1_3::OperandType type) { - return isExtensionOperandType(static_cast<OperandType>(type)); -} - -bool isExtensionOperationType(V1_3::OperationType type) { - return isExtensionOperationType(static_cast<OperationType>(type)); -} - -std::string getOperandTypeName(V1_3::OperandType type) { - return toString(type); -} - -std::string getOperationName(V1_3::OperationType type) { - return toString(type); -} - -uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type, - const std::vector<uint32_t>& dimensions) { - return nonExtensionOperandSizeOfData(uncheckedConvert(type), dimensions); -} - -bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type, - const std::vector<uint32_t>& dimensions) { - return nonExtensionOperandSizeOfDataOverflowsUInt32(uncheckedConvert(type), dimensions); -} - -bool tensorHasUnspecifiedDimensions(V1_3::OperandType type, - const std::vector<uint32_t>& dimensions) { - return tensorHasUnspecifiedDimensions(static_cast<int>(type), dimensions.data(), - dimensions.size()); -} - -bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand) { - return tensorHasUnspecifiedDimensions(static_cast<int>(operand.type), operand.dimensions.data(), - operand.dimensions.size()); -} - -void logModelToInfo(const V1_0::Model& model) { - LOG(INFO) << "V1_0::Model start"; - LOG(INFO) << "operands" << toString(model.operands); - LOG(INFO) << "operations" << toString(model.operations); - LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); - LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); - LOG(INFO) << "operandValues size" << model.operandValues.size(); - LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); -} - -void logModelToInfo(const V1_1::Model& model) { - LOG(INFO) << "V1_1::Model start"; - LOG(INFO) << "operands" << toString(model.operands); - LOG(INFO) << "operations" << toString(model.operations); - LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); - LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); - LOG(INFO) << "operandValues size " << model.operandValues.size(); - LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); -} - -void logModelToInfo(const V1_2::Model& model) { - LOG(INFO) << "V1_2::Model start"; - LOG(INFO) << "operands" << toString(model.operands); - LOG(INFO) << "operations" << toString(model.operations); - LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); - LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); - LOG(INFO) << "operandValues size" << model.operandValues.size(); - LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); - LOG(INFO) << "relaxComputationFloat32toFloat16" << model.relaxComputationFloat32toFloat16; - LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); -} - -static void logSubgraphToInfo(std::string label, const V1_3::Subgraph& subgraph) { - LOG(INFO) << label << ".operands" << toString(subgraph.operands); - LOG(INFO) << label << ".operations" << toString(subgraph.operations); - LOG(INFO) << label << ".inputIndexes" << toString(subgraph.inputIndexes); - LOG(INFO) << label << ".outputIndexes" << toString(subgraph.outputIndexes); -} - -void logModelToInfo(const V1_3::Model& model) { - LOG(INFO) << "V1_3::Model start"; - logSubgraphToInfo("main", model.main); - for (uint32_t i = 0, n = model.referenced.size(); i < n; ++i) { - logSubgraphToInfo("referenced[" + std::to_string(i) + "]", model.referenced[i]); - } - LOG(INFO) << "operandValues size " << model.operandValues.size(); - LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); - LOG(INFO) << "relaxComputationFloat32toFloat16 " << model.relaxComputationFloat32toFloat16; - LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); -} - -bool validateOperandSymmPerChannelQuantParams( - const V1_3::Operand& halOperand, - const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag) { - if (halOperand.type != V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - return false; - } - - NN_RET_CHECK_LT(channelQuant.channelDim, halOperand.dimensions.size()) << tag; - NN_RET_CHECK(channelQuant.scales != nullptr) << tag; - NN_RET_CHECK_EQ(channelQuant.scaleCount, halOperand.dimensions[channelQuant.channelDim]) << tag; - NN_RET_CHECK_NE(halOperand.dimensions[channelQuant.channelDim], 0u) - << tag << " channel dimension " << channelQuant.channelDim << " is underspecified"; - for (uint32_t i = 0; i < halOperand.dimensions[channelQuant.channelDim]; i++) { - NN_RET_CHECK_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]"; - } - return true; -} - -static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion, - HalVersion minSupportedHalVersion) { - if (halVersion < minSupportedHalVersion) { - LOG(ERROR) << "The given inputs and outputs for operation " << opType - << " are only supported in " << minSupportedHalVersion - << " and later (validating using " << halVersion << ")"; - return ANEURALNETWORKS_BAD_DATA; - } - return ANEURALNETWORKS_NO_ERROR; -} - -static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, - const uint32_t* inputIndexes, uint32_t outputCount, - const uint32_t* outputIndexes, - const std::vector<Operand>& operands, HalVersion halVersion) { - if (opType == ANEURALNETWORKS_IF || opType == ANEURALNETWORKS_WHILE) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - LOG(ERROR) << "This validateOperation() overload does not support control flow"; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperation(opType, inputCount, inputIndexes, outputCount, outputIndexes, operands, - halVersion, {}); -} - -V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode) { - return convertToV1_3(convertResultCodeToErrorStatus(resultCode)); -} - -int convertErrorStatusToResultCode(V1_3::ErrorStatus status) { - return convertErrorStatusToResultCode(uncheckedConvert(status)); -} - -std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult( - V1_3::ErrorStatus status, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, - const V1_2::Timing& timing) { - return getExecutionResult(uncheckedConvert(status), uncheckedConvert(outputShapes), - uncheckedConvert(timing)); -} - -// Capabilities::operandPerformance utilities. -// The field Capabilities::operandPerformance is a vector sorted by the field -// Capabilities::OperandPerformance::type. - -template <HalVersion version> -hardware::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance( - V1_0::PerformanceInfo perf) { - using OpPerf = VersionedOperandPerformance<version>; - - // Note: range presents enumerators in declaration order, not in numerical order. - static constexpr hardware::hidl_enum_range<VersionedOperandType<version>> kOperandTypeRange; - - std::vector<OpPerf> ret; - ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin()); - for (VersionedOperandType<version> type : kOperandTypeRange) { - if (static_cast<V1_3::OperandType>(type) != V1_3::OperandType::SUBGRAPH) { - ret.push_back(OpPerf{type, perf}); - } - } - std::sort(ret.begin(), ret.end(), - [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; }); - - return ret; -} - -template hardware::hidl_vec<V1_2::Capabilities::OperandPerformance> -nonExtensionOperandPerformance<HalVersion::V1_2>(V1_0::PerformanceInfo perf); -template hardware::hidl_vec<V1_3::Capabilities::OperandPerformance> -nonExtensionOperandPerformance<HalVersion::V1_3>(V1_0::PerformanceInfo perf); - -template <HalVersion version> -void update(hardware::hidl_vec<VersionedOperandPerformance<version>>* operandPerformance, - VersionedOperandType<version> type, V1_0::PerformanceInfo perf) { - CHECK(operandPerformance != nullptr); - const auto it = - std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type, - [](const VersionedOperandPerformance<version>& perf, - VersionedOperandType<version> type) { return perf.type < type; }); - CHECK(it != operandPerformance->end()) - << toString(type) << " not in " << toString(*operandPerformance); - it->info = perf; -} - -void update(hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance, - V1_2::OperandType type, V1_0::PerformanceInfo perf) { - update<HalVersion::V1_2>(operandPerformance, type, perf); -} -void update(hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance, - V1_3::OperandType type, V1_0::PerformanceInfo perf) { - update<HalVersion::V1_3>(operandPerformance, type, perf); -} - -template <HalVersion version> -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec<VersionedOperandPerformance<version>>& operandPerformance, - VersionedOperandType<version> type) { - const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type, - [](const VersionedOperandPerformance<version>& perf, - VersionedOperandType<version> type) { - return static_cast<V1_3::OperandType>(perf.type) < - static_cast<V1_3::OperandType>(type); - }); - if (it == operandPerformance.end()) { - LOG(WARNING) << "No PerformanceInfo for " << toString(type); - return kNoPerformanceInfo; - } else { - return it->info; - } -} - -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance, - V1_2::OperandType type) { - return lookup<HalVersion::V1_2>(operandPerformance, type); -} -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance, - V1_3::OperandType type) { - CHECK(type != V1_3::OperandType::SUBGRAPH) - << "Use Capabilities::ifPerformance or Capabilities::whilePerformance"; - return lookup<HalVersion::V1_3>(operandPerformance, type); -} - -bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos, - const hardware::hidl_vec<hardware::hidl_memory>& pools) { - return setRunTimePoolInfosFromCanonicalMemories(poolInfos, uncheckedConvert(pools)); -} - -// Versioning - -// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM. -// This array must be in sorted order. -static const V1_3::OperandType kQuantized8PerformanceConsistentWithP[] = { - V1_3::OperandType::INT32, V1_3::OperandType::UINT32, V1_3::OperandType::TENSOR_INT32, - V1_3::OperandType::OEM, V1_3::OperandType::TENSOR_OEM_BYTE}; - -static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) { - const V1_0::PerformanceInfo quantized8Performance = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM); - return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP), - std::end(kQuantized8PerformanceConsistentWithP), - [quantized8Performance, &capabilities](V1_3::OperandType type) { - return quantized8Performance == - lookup(capabilities.operandPerformance, - static_cast<V1_2::OperandType>(type)); - }); -} - -static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) { - const V1_0::PerformanceInfo quantized8Performance = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM); - return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP), - std::end(kQuantized8PerformanceConsistentWithP), - [quantized8Performance, &capabilities](V1_3::OperandType type) { - return quantized8Performance == - lookup(capabilities.operandPerformance, type); - }); -} - -static hardware::hidl_vec<V1_2::Capabilities::OperandPerformance> -makeQuantized8PerformanceConsistentWithP(V1_0::PerformanceInfo quantized8Performance) { - hardware::hidl_vec<V1_2::Capabilities::OperandPerformance> ret( - std::size(kQuantized8PerformanceConsistentWithP)); - std::transform(std::begin(kQuantized8PerformanceConsistentWithP), - std::end(kQuantized8PerformanceConsistentWithP), ret.begin(), - [quantized8Performance]( - V1_3::OperandType type) -> V1_2::Capabilities::OperandPerformance { - return {static_cast<V1_2::OperandType>(type), quantized8Performance}; - }); - return ret; -} - -bool compliantWithV1_0(const V1_0::Capabilities&) { - return true; -} - -bool compliantWithV1_0(const V1_1::Capabilities& capabilities) { - return capabilities.relaxedFloat32toFloat16Performance == capabilities.float32Performance; -} - -bool compliantWithV1_0(const V1_2::Capabilities& capabilities) { - const V1_0::PerformanceInfo perfTensorFloat32 = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32); - const V1_0::PerformanceInfo perfFloat32 = - lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32); - if (perfTensorFloat32 != perfFloat32 || - perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor || - perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) { - return false; - } - - return isQuantized8PerformanceConsistentWithP(capabilities); -} - -bool compliantWithV1_0(const V1_3::Capabilities& capabilities) { - const V1_0::PerformanceInfo perfTensorFloat32 = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32); - const V1_0::PerformanceInfo perfFloat32 = - lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32); - if (perfTensorFloat32 != perfFloat32 || - perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor || - perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) { - return false; - } - - return isQuantized8PerformanceConsistentWithP(capabilities); -} - -bool compliantWithV1_1(const V1_0::Capabilities&) { - return true; -} - -bool compliantWithV1_1(const V1_1::Capabilities&) { - return true; -} - -bool compliantWithV1_1(const V1_2::Capabilities& capabilities) { - if ((capabilities.relaxedFloat32toFloat16PerformanceTensor != - capabilities.relaxedFloat32toFloat16PerformanceScalar) || - (lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32) != - lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32))) { - return false; - } - - return isQuantized8PerformanceConsistentWithP(capabilities); -} - -bool compliantWithV1_1(const V1_3::Capabilities& capabilities) { - if ((capabilities.relaxedFloat32toFloat16PerformanceTensor != - capabilities.relaxedFloat32toFloat16PerformanceScalar) || - (lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32) != - lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32))) { - return false; - } - - return isQuantized8PerformanceConsistentWithP(capabilities); -} - -bool compliantWithV1_2(const V1_0::Capabilities&) { - return true; -} - -bool compliantWithV1_2(const V1_1::Capabilities&) { - return true; -} - -bool compliantWithV1_2(const V1_2::Capabilities&) { - return true; -} - -bool compliantWithV1_2(const V1_3::Capabilities&) { - return true; -} - -bool compliantWithV1_3(const V1_0::Capabilities&) { - return true; -} - -bool compliantWithV1_3(const V1_1::Capabilities&) { - return true; -} - -bool compliantWithV1_3(const V1_2::Capabilities&) { - return true; -} - -bool compliantWithV1_3(const V1_3::Capabilities&) { - return true; -} - -V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status) { - return status; -} - -V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status) { - switch (status) { - case V1_3::ErrorStatus::NONE: - return V1_0::ErrorStatus::NONE; - case V1_3::ErrorStatus::DEVICE_UNAVAILABLE: - return V1_0::ErrorStatus::DEVICE_UNAVAILABLE; - case V1_3::ErrorStatus::GENERAL_FAILURE: - return V1_0::ErrorStatus::GENERAL_FAILURE; - case V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: - return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; - case V1_3::ErrorStatus::INVALID_ARGUMENT: - return V1_0::ErrorStatus::INVALID_ARGUMENT; - case V1_3::ErrorStatus::MISSED_DEADLINE_TRANSIENT: - return V1_0::ErrorStatus::GENERAL_FAILURE; - case V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT: - return V1_0::ErrorStatus::GENERAL_FAILURE; - case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: - return V1_0::ErrorStatus::GENERAL_FAILURE; - case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: - return V1_0::ErrorStatus::GENERAL_FAILURE; - } - LOG(ERROR) << "Unknown ErrorStatus: " << toString(status) << " mapped to GENERAL_FAILURE"; - return V1_0::ErrorStatus::GENERAL_FAILURE; -} - -V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status) { - return static_cast<V1_3::ErrorStatus>(status); -} - -V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status) { - return status; -} - -static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) { - return static_cast<V1_0::OperationType>(type); -} - -static V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) { - return static_cast<V1_0::OperationType>(type); -} - -V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type) { - return static_cast<V1_0::OperationType>(type); -} - -static V1_1::OperationType convertToV1_1(V1_0::OperationType type) { - return static_cast<V1_1::OperationType>(type); -} - -static V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type) { - return static_cast<V1_1::OperationType>(type); -} - -V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type) { - return static_cast<V1_1::OperationType>(type); -} - -static V1_2::OperationType convertToV1_2(V1_0::OperationType type) { - return static_cast<V1_2::OperationType>(type); -} - -static V1_2::OperationType convertToV1_2(V1_1::OperationType type) { - return static_cast<V1_2::OperationType>(type); -} - -V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type) { - return static_cast<V1_2::OperationType>(type); -} - -static V1_3::OperationType convertToV1_3(V1_0::OperationType type) { - return static_cast<V1_3::OperationType>(type); -} - -static V1_3::OperationType convertToV1_3(V1_1::OperationType type) { - return static_cast<V1_3::OperationType>(type); -} - -static V1_3::OperationType convertToV1_3(V1_2::OperationType type) { - return static_cast<V1_3::OperationType>(type); -} - -V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) { - return capabilities; -} - -V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) { - if (!compliantWithV1_0(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_1::Capabilities to V1_0::Capabilities"; - } - return {.float32Performance = capabilities.float32Performance, - .quantized8Performance = capabilities.quantized8Performance}; -} - -V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities) { - if (!compliantWithV1_0(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_2::Capabilities to V1_0::Capabilities"; - } - return {.float32Performance = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32), - .quantized8Performance = lookup(capabilities.operandPerformance, - V1_2::OperandType::TENSOR_QUANT8_ASYMM)}; -} - -V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities) { - if (!compliantWithV1_0(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_3::Capabilities to V1_0::Capabilities"; - } - return {.float32Performance = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32), - .quantized8Performance = lookup(capabilities.operandPerformance, - V1_3::OperandType::TENSOR_QUANT8_ASYMM)}; -} - -V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) { - return {.float32Performance = capabilities.float32Performance, - .quantized8Performance = capabilities.quantized8Performance, - .relaxedFloat32toFloat16Performance = capabilities.float32Performance}; -} - -V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) { - return capabilities; -} - -V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities) { - if (!compliantWithV1_1(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_2::Capabilities to V1_1::Capabilities"; - } - return {.float32Performance = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32), - .quantized8Performance = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM), - .relaxedFloat32toFloat16Performance = - capabilities.relaxedFloat32toFloat16PerformanceTensor}; -} - -V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities) { - if (!compliantWithV1_1(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_3::Capabilities to V1_1::Capabilities"; - } - return {.float32Performance = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32), - .quantized8Performance = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM), - .relaxedFloat32toFloat16Performance = - capabilities.relaxedFloat32toFloat16PerformanceTensor}; -} - -V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities) { - V1_2::Capabilities ret = { - .relaxedFloat32toFloat16PerformanceScalar = capabilities.float32Performance, - .relaxedFloat32toFloat16PerformanceTensor = capabilities.float32Performance, - .operandPerformance = - makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)}; - auto& opPerf = ret.operandPerformance; - opPerf.resize(opPerf.size() + 2); - opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32, - capabilities.float32Performance}; - opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance}; - using OperandPerformance = V1_2::Capabilities::OperandPerformance; - std::sort(opPerf.begin(), opPerf.end(), - [](const OperandPerformance& a, const OperandPerformance& b) { - return a.type < b.type; - }); - return ret; -} - -V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities) { - V1_2::Capabilities ret = {.relaxedFloat32toFloat16PerformanceScalar = - capabilities.relaxedFloat32toFloat16Performance, - .relaxedFloat32toFloat16PerformanceTensor = - capabilities.relaxedFloat32toFloat16Performance, - .operandPerformance = makeQuantized8PerformanceConsistentWithP( - capabilities.quantized8Performance)}; - auto& opPerf = ret.operandPerformance; - opPerf.resize(opPerf.size() + 2); - opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32, - capabilities.float32Performance}; - opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance}; - using OperandPerformance = V1_2::Capabilities::OperandPerformance; - std::sort(opPerf.begin(), opPerf.end(), - [](const OperandPerformance& a, const OperandPerformance& b) { - return a.type < b.type; - }); - return ret; -} - -V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities) { - return capabilities; -} - -V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities) { - V1_2::Capabilities ret = { - .relaxedFloat32toFloat16PerformanceScalar = - capabilities.relaxedFloat32toFloat16PerformanceScalar, - .relaxedFloat32toFloat16PerformanceTensor = - capabilities.relaxedFloat32toFloat16PerformanceTensor, - }; - const auto& inputOpPerf = capabilities.operandPerformance; - hardware::hidl_vec<V1_3::Capabilities::OperandPerformance> opPerfSupported; - opPerfSupported.resize(inputOpPerf.size()); - auto last = - std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(), - [](V1_3::Capabilities::OperandPerformance opPerf) { - return validOperandType(static_cast<V1_2::OperandType>(opPerf.type)); - }); - opPerfSupported.resize(std::distance(opPerfSupported.begin(), last)); - - auto& convertedOpPerf = ret.operandPerformance; - convertedOpPerf.resize(opPerfSupported.size()); - std::transform(opPerfSupported.begin(), opPerfSupported.end(), convertedOpPerf.begin(), - [](V1_3::Capabilities::OperandPerformance opPerf) { - return V1_2::Capabilities::OperandPerformance{ - static_cast<V1_2::OperandType>(opPerf.type), opPerf.info}; - }); - return ret; -} - -V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities) { - return convertToV1_3(convertToV1_2(capabilities)); -} - -V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities) { - return convertToV1_3(convertToV1_2(capabilities)); -} - -V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities) { - V1_3::Capabilities ret = { - .relaxedFloat32toFloat16PerformanceScalar = - capabilities.relaxedFloat32toFloat16PerformanceScalar, - .relaxedFloat32toFloat16PerformanceTensor = - capabilities.relaxedFloat32toFloat16PerformanceTensor, - .ifPerformance = kNoPerformanceInfo, - .whilePerformance = kNoPerformanceInfo, - }; - auto& opPerf = ret.operandPerformance; - opPerf.resize(capabilities.operandPerformance.size()); - std::transform(capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(), - opPerf.begin(), [](V1_2::Capabilities::OperandPerformance opPerf) { - return V1_3::Capabilities::OperandPerformance{ - static_cast<V1_3::OperandType>(opPerf.type), opPerf.info}; - }); - return ret; -} - -V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities) { - return capabilities; -} - -static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) { - return {.type = uncheckedConvertToV1_0(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) { - return {.type = convertToV1_1(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0( - const hardware::hidl_vec<V1_1::Operation>& operations) { - hardware::hidl_vec<V1_0::Operation> result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); }); - return result; -} - -static hardware::hidl_vec<V1_1::Operation> convertToV1_1( - const hardware::hidl_vec<V1_0::Operation>& operations) { - hardware::hidl_vec<V1_1::Operation> result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_0::Operation& operation) { return convertToV1_1(operation); }); - return result; -} - -bool compliantWithV1_0(const V1_3::Operand& operand) { - return validOperandType(static_cast<V1_0::OperandType>(operand.type)) && - (nonExtensionOperandTypeIsScalar(static_cast<int>(operand.type)) || - operand.dimensions.size() != 0) && - compliantWithV1_0(operand.lifetime); -} - -bool compliantWithV1_2(const V1_3::Operand& operand) { - return validOperandType(static_cast<V1_2::OperandType>(operand.type)) && - compliantWithV1_0(operand.lifetime); -} - -bool compliantWithV1_3(const V1_3::Operand& /*operand*/) { - return true; -} - -bool compliantWithAidl(const V1_3::Operand& operand) { - if (static_cast<std::underlying_type_t<V1_3::OperandType>>(operand.type) > - std::numeric_limits<int32_t>::max()) { - return false; - } - if (operand.location.poolIndex > std::numeric_limits<int32_t>::max()) { - return false; - } - if (operand.extraParams.getDiscriminator() == - V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant && - operand.extraParams.channelQuant().channelDim > std::numeric_limits<int32_t>::max()) { - return false; - } - for (auto dim : operand.dimensions) { - if (dim > std::numeric_limits<int32_t>::max()) { - return false; - } - } - return true; -} - -static bool compliantWith(HalVersion version, const V1_3::Model& model, - std::set<uint32_t>* noncompliantOperations) { - // A boolean vector indicating whether each pool is compliant with the target HAL version. - std::vector<bool> isPoolCompliant(model.pools.size(), false); - std::transform( - model.pools.begin(), model.pools.end(), isPoolCompliant.begin(), - [version](const hardware::hidl_memory& pool) { return validatePool(pool, version); }); - - // A boolean vector indicating whether each operand is compliant with the target HAL version. - std::vector<bool> isOperandCompliant(model.main.operands.size(), false); - std::transform(model.main.operands.begin(), model.main.operands.end(), - isOperandCompliant.begin(), - [&isPoolCompliant, version](const V1_3::Operand& op) { - bool is_operand_compliant = false; - switch (version) { - case HalVersion::UNKNOWN: - is_operand_compliant = false; - break; - case HalVersion::V1_0: - is_operand_compliant = compliantWithV1_0(op); - break; - case HalVersion::V1_1: - // There is no V1_1::Operand -- both V1_0::Model - // and V1_1::Model use V1_0::Operand. - is_operand_compliant = compliantWithV1_0(op); - break; - case HalVersion::V1_2: - is_operand_compliant = compliantWithV1_2(op); - break; - case HalVersion::V1_3: - is_operand_compliant = compliantWithV1_3(op); - break; - case HalVersion::AIDL_UNSTABLE: - is_operand_compliant = compliantWithAidl(op); - break; - } - return is_operand_compliant && - !(op.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE && - !isPoolCompliant[op.location.poolIndex]); - }); - - auto allOperandsCompliant = [&isOperandCompliant](const hardware::hidl_vec<uint32_t>& indices) { - return std::all_of( - indices.begin(), indices.end(), - [&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; }); - }; - - auto localValidateOperation = [&model, version, - &allOperandsCompliant](const V1_3::Operation& op) { - if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false; - int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(), - op.inputs.size() > 0 ? op.inputs.data() : nullptr, - op.outputs.size(), - op.outputs.size() > 0 ? op.outputs.data() : nullptr, - uncheckedConvert(model.main.operands), version); - return error == ANEURALNETWORKS_NO_ERROR; - }; - - if (noncompliantOperations) { - CHECK(noncompliantOperations->empty()); - for (uint32_t idx = 0; idx < model.main.operations.size(); ++idx) { - if (!localValidateOperation(model.main.operations[idx])) { - noncompliantOperations->insert(idx); - } - } - return noncompliantOperations->empty(); - } else { - return std::all_of(model.main.operations.begin(), model.main.operations.end(), - localValidateOperation); - } -} - -bool compliantWithV1_0(const V1_0::Model& /*model*/) { - return true; -} - -bool compliantWithV1_0(const V1_1::Model& model) { - // In addition to new enumeration values being introduced in V1_1::Model, a - // new flag was introduced to indicate whether or not float32 data can be - // calculated using float16 units. This 'relaxComputationFloat32toFloat16' - // flag is not relevant in whether a V1_1::Model is compliant with a - // V1_0::Model because all 1.0 drivers require strict calculation by default - // in the P NN runtime. Even if fp16 calculations are allowed, they can - // still be computed by a strict fp32 driver. - auto operands = uncheckedConvert(convertToV1_3(model.operands)); - return std::all_of(model.operations.begin(), model.operations.end(), - [&operands](const V1_1::Operation& op) { - int error = validateOperation( - static_cast<int32_t>(op.type), op.inputs.size(), - op.inputs.size() > 0 ? op.inputs.data() : nullptr, - op.outputs.size(), - op.outputs.size() > 0 ? op.outputs.data() : nullptr, operands, - HalVersion::V1_0); - return error == ANEURALNETWORKS_NO_ERROR; - }); -} - -bool compliantWithV1_0(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) { - return compliantWith(HalVersion::V1_0, convertToV1_3(model), noncompliantOperations); -} - -bool compliantWithV1_0(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) { - return compliantWith(HalVersion::V1_0, model, noncompliantOperations); -} - -bool compliantWithV1_1(const V1_0::Model&) { - return true; -} - -bool compliantWithV1_1(const V1_1::Model&) { - return true; -} - -bool compliantWithV1_1(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) { - return compliantWith(HalVersion::V1_1, convertToV1_3(model), noncompliantOperations); -} - -bool compliantWithV1_1(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) { - return compliantWith(HalVersion::V1_1, model, noncompliantOperations); -} - -bool compliantWithV1_2(const V1_0::Model&) { - return true; -} - -bool compliantWithV1_2(const V1_1::Model&) { - return true; -} - -bool compliantWithV1_2(const V1_2::Model&, std::set<uint32_t>* /*noncompliantOperations*/) { - return true; -} - -bool compliantWithV1_2(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) { - return compliantWith(HalVersion::V1_2, model, noncompliantOperations); -} - -static V1_0::Operation uncheckedConvertToV1_0(const V1_2::Operation& operation) { - return {.type = uncheckedConvertToV1_0(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_0::Operation uncheckedConvertToV1_0(const V1_3::Operation& operation) { - return {.type = uncheckedConvertToV1_0(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) { - return {.type = uncheckedConvertToV1_1(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_1::Operation uncheckedConvertToV1_1(const V1_3::Operation& operation) { - return {.type = uncheckedConvertToV1_1(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) { - return {.type = convertToV1_2(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_2::Operation convertToV1_2(const V1_1::Operation& operation) { - return {.type = convertToV1_2(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_2::Operation uncheckedConvertToV1_2(const V1_3::Operation& operation) { - return {.type = uncheckedConvertToV1_2(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_3::Operation convertToV1_3(const V1_0::Operation& operation) { - return {.type = convertToV1_3(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_3::Operation convertToV1_3(const V1_1::Operation& operation) { - return {.type = convertToV1_3(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_3::Operation convertToV1_3(const V1_2::Operation& operation) { - return {.type = convertToV1_3(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0( - const hardware::hidl_vec<V1_3::Operation>& operations) { - hardware::hidl_vec<V1_0::Operation> result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); }); - return result; -} - -static hardware::hidl_vec<V1_0::Operation> uncheckedConvertToV1_0( - const hardware::hidl_vec<V1_2::Operation>& operations) { - hardware::hidl_vec<V1_0::Operation> result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); }); - return result; -} - -static hardware::hidl_vec<V1_2::Operation> uncheckedConvertToV1_2( - const hardware::hidl_vec<V1_3::Operation>& operations) { - hardware::hidl_vec<V1_2::Operation> result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); }); - return result; -} - -static hardware::hidl_vec<V1_1::Operation> uncheckedConvertToV1_1( - const hardware::hidl_vec<V1_2::Operation>& operations) { - hardware::hidl_vec<V1_1::Operation> result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); }); - return result; -} - -static hardware::hidl_vec<V1_1::Operation> uncheckedConvertToV1_1( - const hardware::hidl_vec<V1_3::Operation>& operations) { - hardware::hidl_vec<V1_1::Operation> result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); }); - return result; -} - -static hardware::hidl_vec<V1_2::Operation> convertToV1_2( - const hardware::hidl_vec<V1_0::Operation>& operations) { - hardware::hidl_vec<V1_2::Operation> result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_0::Operation& operation) { return convertToV1_2(operation); }); - return result; -} - -static hardware::hidl_vec<V1_2::Operation> convertToV1_2( - const hardware::hidl_vec<V1_1::Operation>& operations) { - hardware::hidl_vec<V1_2::Operation> result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_1::Operation& operation) { return convertToV1_2(operation); }); - return result; -} - -static hardware::hidl_vec<V1_3::Operation> convertToV1_3( - const hardware::hidl_vec<V1_0::Operation>& operations) { - hardware::hidl_vec<V1_3::Operation> result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_0::Operation& operation) { return convertToV1_3(operation); }); - return result; -} - -static hardware::hidl_vec<V1_3::Operation> convertToV1_3( - const hardware::hidl_vec<V1_1::Operation>& operations) { - hardware::hidl_vec<V1_3::Operation> result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_1::Operation& operation) { return convertToV1_3(operation); }); - return result; -} - -static hardware::hidl_vec<V1_3::Operation> convertToV1_3( - const hardware::hidl_vec<V1_2::Operation>& operations) { - hardware::hidl_vec<V1_3::Operation> result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_2::Operation& operation) { return convertToV1_3(operation); }); - return result; -} - -static bool compliantWithV1_0(const V1_2::OperandType& operandType) { - return validOperandType(static_cast<V1_0::OperandType>(operandType)); -} - -static bool compliantWithV1_0(const V1_3::OperandType& operandType) { - return validOperandType(static_cast<V1_0::OperandType>(operandType)); -} - -static bool compliantWithV1_2(const V1_3::OperandType& operandType) { - return validOperandType(static_cast<V1_2::OperandType>(operandType)); -} - -V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) { - if (!compliantWithV1_0(operandType)) { - LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) - << " from V1_2::OperandType to V1_0::OperandType"; - } - return static_cast<V1_0::OperandType>(operandType); -} - -V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) { - return static_cast<V1_2::OperandType>(operandType); -} - -V1_2::OperandType convertToV1_2(const V1_3::OperandType& operandType) { - if (!compliantWithV1_2(operandType)) { - LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) - << " from V1_3::OperandType to V1_2::OperandType"; - } - return static_cast<V1_2::OperandType>(operandType); -} - -V1_0::OperandType convertToV1_0(const V1_3::OperandType& operandType) { - if (!compliantWithV1_0(operandType)) { - LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) - << " from V1_3::Operand to V1_0::Operand"; - } - return static_cast<V1_0::OperandType>(operandType); -} - -bool compliantWithV1_0(V1_0::OperandLifeTime /*lifetime*/) { - return true; -} - -bool compliantWithV1_0(V1_3::OperandLifeTime lifetime) { - return lifetime != V1_3::OperandLifeTime::SUBGRAPH; -} - -bool compliantWithV1_3(V1_0::OperandLifeTime /*lifetime*/) { - return true; -} - -bool compliantWithV1_3(V1_3::OperandLifeTime /*lifetime*/) { - return true; -} - -V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime) { - return lifetime; -} - -V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime) { - if (!compliantWithV1_0(lifetime)) { - LOG(ERROR) << "Upcasting non-compliant lifetime " << toString(lifetime) - << " from V1_3 to V1_0"; - } - return static_cast<V1_0::OperandLifeTime>(lifetime); -} - -V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime) { - return static_cast<V1_3::OperandLifeTime>(lifetime); -} - -V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime) { - return lifetime; -} - -V1_0::Operand convertToV1_0(const V1_2::Operand& operand) { - return {.type = convertToV1_0(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = convertToV1_0(operand.lifetime), - .location = operand.location}; -} - -V1_0::Operand convertToV1_0(const V1_3::Operand& operand) { - return {.type = convertToV1_0(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = convertToV1_0(operand.lifetime), - .location = operand.location}; -} - -V1_2::Operand convertToV1_2(const V1_0::Operand& operand) { - return {.type = convertToV1_2(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = operand.lifetime, - .location = operand.location}; -} - -V1_2::Operand convertToV1_2(const V1_3::Operand& operand) { - return {.type = convertToV1_2(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = static_cast<V1_0::OperandLifeTime>(operand.lifetime), - .location = operand.location, - .extraParams = operand.extraParams}; -} - -V1_3::Operand convertToV1_3(const V1_0::Operand& operand) { - return {.type = static_cast<V1_3::OperandType>(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = convertToV1_3(operand.lifetime), - .location = operand.location}; -} - -V1_3::Operand convertToV1_3(const V1_2::Operand& operand) { - return {.type = static_cast<V1_3::OperandType>(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = convertToV1_3(operand.lifetime), - .location = operand.location, - .extraParams = operand.extraParams}; -} - -V1_3::Operand convertToV1_3(const V1_3::Operand& operand) { - return operand; -} - -hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_0::Operand>& operands) { - return operands; -} - -hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_2::Operand>& operands) { - hardware::hidl_vec<V1_0::Operand> result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_2::Operand& operand) { return convertToV1_0(operand); }); - return result; -} - -hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_3::Operand>& operands) { - hardware::hidl_vec<V1_0::Operand> result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_3::Operand& operand) { return convertToV1_0(operand); }); - return result; -} - -hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_0::Operand>& operands) { - hardware::hidl_vec<V1_2::Operand> result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_0::Operand& operand) { return convertToV1_2(operand); }); - return result; -} - -hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_2::Operand>& operands) { - return operands; -} - -hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_3::Operand>& operands) { - hardware::hidl_vec<V1_2::Operand> result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_3::Operand& operand) { return convertToV1_2(operand); }); - return result; -} - -hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_0::Operand>& operands) { - hardware::hidl_vec<V1_3::Operand> result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_0::Operand& operand) { return convertToV1_3(operand); }); - return result; -} - -hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_2::Operand>& operands) { - hardware::hidl_vec<V1_3::Operand> result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_2::Operand& operand) { return convertToV1_3(operand); }); - return result; -} - -hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_3::Operand>& operands) { - return operands; -} - -V1_0::Model convertToV1_0(const V1_0::Model& model) { - return model; -} - -V1_0::Model convertToV1_0(const V1_1::Model& model) { - if (!compliantWithV1_0(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_1::Model to V1_0::Model"; - } - return {.operands = model.operands, - .operations = uncheckedConvertToV1_0(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools}; -} - -V1_0::Model convertToV1_0(const V1_2::Model& model) { - if (!compliantWithV1_0(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_2::Model to V1_0::Model"; - } - return {.operands = convertToV1_0(model.operands), - .operations = uncheckedConvertToV1_0(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools}; -} - -V1_0::Model convertToV1_0(const V1_3::Model& model) { - if (!compliantWithV1_0(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_3::Model to V1_0::Model"; - } - return {.operands = convertToV1_0(model.main.operands), - .operations = uncheckedConvertToV1_0(model.main.operations), - .inputIndexes = model.main.inputIndexes, - .outputIndexes = model.main.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools}; -} - -V1_1::Model convertToV1_1(const V1_0::Model& model) { - return {.operands = model.operands, - .operations = convertToV1_1(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = false}; -} - -V1_1::Model convertToV1_1(const V1_1::Model& model) { - return model; -} - -V1_1::Model convertToV1_1(const V1_2::Model& model) { - if (!compliantWithV1_1(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_2::Model to V1_1::Model"; - } - return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical. - .operations = uncheckedConvertToV1_1(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; -} - -V1_1::Model convertToV1_1(const V1_3::Model& model) { - if (!compliantWithV1_1(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_3::Model to V1_1::Model"; - } - return {// Operands in 1.1 and 1.0 are identical. - .operands = convertToV1_0(model.main.operands), - .operations = uncheckedConvertToV1_1(model.main.operations), - .inputIndexes = model.main.inputIndexes, - .outputIndexes = model.main.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; -} - -V1_2::Model convertToV1_2(const V1_0::Model& model) { - return {.operands = convertToV1_2(model.operands), - .operations = convertToV1_2(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = false}; -} - -V1_2::Model convertToV1_2(const V1_1::Model& model) { - return {.operands = convertToV1_2(model.operands), - .operations = convertToV1_2(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; -} - -V1_2::Model convertToV1_2(const V1_2::Model& model) { - return model; -} - -V1_2::Model convertToV1_2(const V1_3::Model& model) { - if (!compliantWithV1_2(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_3::Model to V1_2::Model"; - } - return {.operands = convertToV1_2(model.main.operands), - .operations = uncheckedConvertToV1_2(model.main.operations), - .inputIndexes = model.main.inputIndexes, - .outputIndexes = model.main.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = model.extensionNameToPrefix}; -} - -V1_3::Model convertToV1_3(const V1_0::Model& model) { - return {.main = {.operands = convertToV1_3(model.operands), - .operations = convertToV1_3(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes}, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = false}; -} - -V1_3::Model convertToV1_3(const V1_1::Model& model) { - return {.main = {.operands = convertToV1_3(model.operands), - .operations = convertToV1_3(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes}, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; -} - -V1_3::Model convertToV1_3(const V1_2::Model& model) { - return {.main = {.operands = convertToV1_3(model.operands), - .operations = convertToV1_3(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes}, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = model.extensionNameToPrefix}; -} - -V1_3::Model convertToV1_3(const V1_3::Model& model) { - return model; -} - -bool compliantWithV1_0(const V1_0::Request& /*request*/) { - return true; -} - -bool compliantWithV1_0(const V1_3::Request& request) { - return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) { - if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) { - return false; - } - const auto& name = pool.hidlMemory().name(); - return name == "ashmem" || name == "mmap_fd"; - }); -} - -bool compliantWithV1_2(const V1_3::Request& request) { - return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) { - if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) { - return false; - } - const auto& name = pool.hidlMemory().name(); - return name == "ashmem" || name == "mmap_fd" || name == "hardware_buffer_blob" || - name == "hardware_buffer"; - }); -} - -static hardware::hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) { - switch (pool.getDiscriminator()) { - case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory: - return pool.hidlMemory(); - case V1_3::Request::MemoryPool::hidl_discriminator::token: - return hardware::hidl_memory{}; - } -} - -static V1_3::Request::MemoryPool convertToV1_3(const hardware::hidl_memory& pool) { - V1_3::Request::MemoryPool ret; - ret.hidlMemory(pool); - return ret; -} - -V1_0::Request convertToV1_0(const V1_0::Request& request) { - return request; -} - -static V1_0::Request uncheckedConvertToV1_0(const V1_3::Request& request) { - hardware::hidl_vec<hardware::hidl_memory> pools(request.pools.size()); - std::transform(request.pools.begin(), request.pools.end(), pools.begin(), - [](const auto& pool) { return convertToV1_0(pool); }); - return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)}; -} - -V1_0::Request convertToV1_0(const V1_3::Request& request) { - if (!compliantWithV1_0(request)) { - LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request)) - << " from V1_3::Request to V1_0::Request of version 1.0"; - } - return uncheckedConvertToV1_0(request); -} - -V1_0::Request convertToV1_2(const V1_3::Request& request) { - if (!compliantWithV1_2(request)) { - LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request)) - << " from V1_3::Request to V1_0::Request of version 1.2"; - } - return uncheckedConvertToV1_0(request); -} - -V1_3::Request convertToV1_3(const V1_0::Request& request) { - hardware::hidl_vec<V1_3::Request::MemoryPool> pools(request.pools.size()); - std::transform(request.pools.begin(), request.pools.end(), pools.begin(), - [](const auto& pool) { return convertToV1_3(pool); }); - return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)}; -} - -V1_3::Request convertToV1_3(const V1_3::Request& request) { - return request; -} - -ErrorStatus uncheckedConvert(V1_0::ErrorStatus status) { - return handleError(convert(status)); -} - -ErrorStatus uncheckedConvert(V1_3::ErrorStatus status) { - return handleError(convert(status)); -} - -OperandType uncheckedConvert(V1_3::OperandType operandType) { - return handleError(unvalidatedConvert(operandType)); -} - -OperationType uncheckedConvert(V1_3::OperationType operandType) { - return handleError(unvalidatedConvert(operandType)); -} - -Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime) { - return handleError(unvalidatedConvert(lifetime)); -} - -MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure) { - return handleError(convert(measure)); -} - -DataLocation uncheckedConvert(const V1_0::DataLocation& location) { - return handleError(unvalidatedConvert(location)); -} - -Operand uncheckedConvert(const V1_3::Operand& operand) { - return handleError(unvalidatedConvert(operand)); -} - -Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params) { - return handleError(unvalidatedConvert(params)); -} - -Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params) { - return handleError(unvalidatedConvert(params)); -} - -Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec<uint8_t>& params) { - return params; -} - -Operation uncheckedConvert(const V1_3::Operation& operation) { - return handleError(unvalidatedConvert(operation)); -} - -template <typename CanonicalType, typename HalType> -static std::vector<CanonicalType> convertVec(const hardware::hidl_vec<HalType>& items) { - std::vector<CanonicalType> result; - result.reserve(items.size()); - std::transform(items.begin(), items.end(), std::back_inserter(result), - [](const HalType& item) { return uncheckedConvert(item); }); - return result; -} - -Model uncheckedConvert(const V1_3::Model& model) { - return handleError(convert(model)); -} - -Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph) { - return handleError(unvalidatedConvert(subgraph)); -} - -Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix& x) { - return handleError(unvalidatedConvert(x)); -} - -Request uncheckedConvert(const V1_3::Request& request) { - return handleError(convert(request)); -} - -Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument) { - return handleError(unvalidatedConvert(requestArgument)); -} - -Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool) { - return handleError(unvalidatedConvert(memoryPool)); -} - -OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape) { - return handleError(unvalidatedConvert(outputShape)); -} - -std::vector<OutputShape> uncheckedConvert( - const hardware::hidl_vec<V1_2::OutputShape>& outputShapes) { - return convertVec<OutputShape>(outputShapes); -} - -Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities) { - return handleError(convert(capabilities)); -} - -Capabilities::OperandPerformance uncheckedConvert( - const V1_3::Capabilities::OperandPerformance& operandPerformance) { - return handleError(unvalidatedConvert(operandPerformance)); -} - -Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo) { - return handleError(unvalidatedConvert(performanceInfo)); -} - -Extension uncheckedConvert(const V1_2::Extension& extension) { - return handleError(unvalidatedConvert(extension)); -} - -std::vector<Extension> uncheckedConvert(const hardware::hidl_vec<V1_2::Extension>& extensions) { - return convertVec<Extension>(extensions); -} - -Extension::OperandTypeInformation uncheckedConvert( - const V1_2::Extension::OperandTypeInformation& info) { - return handleError(unvalidatedConvert(info)); -} - -OptionalDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration) { - return handleError(convert(timeoutDuration)); -} - -Timing uncheckedConvert(const V1_2::Timing& timing) { - return handleError(convert(timing)); -} - -V1_0::ErrorStatus convertToV1_0(ErrorStatus status) { - return static_cast<V1_0::ErrorStatus>(static_cast<int>(status)); -} - -V1_3::ErrorStatus convertToV1_3(ErrorStatus status) { - return handleError(V1_3::utils::convert(status)); -} - -V1_3::OperandType convertToV1_3(OperandType operandType) { - return handleError(V1_3::utils::unvalidatedConvert(operandType)); -} - -V1_3::OperationType convertToV1_3(OperationType operandType) { - return handleError(V1_3::utils::unvalidatedConvert(operandType)); -} - -V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime) { - return handleError(V1_3::utils::unvalidatedConvert(lifetime)); -} - -V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference) { - return handleError(V1_1::utils::convert(preference)); -} - -V1_3::Priority convertToV1_3(Priority priority) { - return handleError(V1_3::utils::convert(priority)); -} - -V1_2::MeasureTiming convertToV1_2(MeasureTiming measure) { - return handleError(V1_2::utils::convert(measure)); -} - -V1_0::DataLocation convertToV1_0(const DataLocation& location) { - return handleError(V1_0::utils::unvalidatedConvert(location)); -} - -V1_3::Operand convertToV1_3(const Operand& operand) { - return handleError(V1_3::utils::unvalidatedConvert(operand)); -} - -V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params) { - return handleError(V1_2::utils::unvalidatedConvert(params)); -} - -V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params) { - return handleError(V1_2::utils::unvalidatedConvert(params)); -} - -hardware::hidl_vec<uint8_t> uncheckedConvert(const Operand::ExtensionParams& params) { - return params; -} - -V1_3::Operation convertToV1_3(const Operation& operation) { - return handleError(V1_3::utils::unvalidatedConvert(operation)); -} - -template <typename HalType, typename CanonicalType> -static hardware::hidl_vec<HalType> convertVecToV1_0(const std::vector<CanonicalType>& items) { - hardware::hidl_vec<HalType> result(items.size()); - std::transform(items.begin(), items.end(), result.begin(), - [](const CanonicalType& item) { return convertToV1_0(item); }); - return result; -} - -template <typename HalType, typename CanonicalType> -static hardware::hidl_vec<HalType> convertVecToV1_2(const std::vector<CanonicalType>& items) { - hardware::hidl_vec<HalType> result(items.size()); - std::transform(items.begin(), items.end(), result.begin(), - [](const CanonicalType& item) { return convertToV1_2(item); }); - return result; -} - -template <typename HalType, typename CanonicalType> -static hardware::hidl_vec<HalType> convertVecToV1_3(const std::vector<CanonicalType>& items) { - hardware::hidl_vec<HalType> result(items.size()); - std::transform(items.begin(), items.end(), result.begin(), - [](const CanonicalType& item) { return convertToV1_3(item); }); - return result; -} - -V1_2::OutputShape convertToV1_2(const OutputShape& outputShape) { - return handleError(V1_2::utils::unvalidatedConvert(outputShape)); -} - -hardware::hidl_vec<V1_2::OutputShape> convertToV1_2(const std::vector<OutputShape>& outputShapes) { - return convertVecToV1_2<V1_2::OutputShape>(outputShapes); -} - -V1_3::Model convertToV1_3(const Model& model) { - return handleError(V1_3::utils::convert(model)); -} - -V1_3::Subgraph convertToV1_3(const Model::Subgraph& subgraph) { - return handleError(V1_3::utils::unvalidatedConvert(subgraph)); -} - -V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix& x) { - return handleError(V1_2::utils::unvalidatedConvert(x)); -} - -V1_3::Request convertToV1_3(const Request& request) { - return handleError(V1_3::utils::convert(request)); -} - -V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument) { - return handleError(V1_0::utils::unvalidatedConvert(requestArgument)); -} - -V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool) { - return handleError(V1_3::utils::unvalidatedConvert(memoryPool)); -} - -std::vector<Request::MemoryPool> uncheckedConvert( - const hardware::hidl_vec<V1_3::Request::MemoryPool>& memoryPools) { - return convertVec<Request::MemoryPool>(memoryPools); -} - -V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint) { - return handleError(V1_3::utils::convert(timePoint)); -} - -V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalDuration& timeoutDuration) { - return handleError(V1_3::utils::convert(timeoutDuration)); -} - -V1_2::Timing convertToV1_2(const Timing& timing) { - return handleError(V1_2::utils::convert(timing)); -} - -V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole) { - return handleError(V1_3::utils::unvalidatedConvert(bufferRole)); -} - -hardware::hidl_vec<V1_3::BufferRole> convertToV1_3(const std::vector<BufferRole>& bufferRoles) { - return convertVecToV1_3<V1_3::BufferRole>(bufferRoles); -} - -hardware::hidl_vec<uint8_t> convertToV1_0(const Model::OperandValues& operandValues) { - return handleError(V1_0::utils::unvalidatedConvert(operandValues)); -} - -hardware::hidl_memory convertToV1_0(const SharedMemory& memory) { - return handleError(V1_0::utils::unvalidatedConvert(memory)); -} - -SharedMemory uncheckedConvert(const hardware::hidl_memory& memory) { - return handleError(convert(memory)); -} - -hardware::hidl_vec<hardware::hidl_memory> convertToV1_0(const std::vector<SharedMemory>& memories) { - return convertVecToV1_0<hardware::hidl_memory>(memories); -} - -std::vector<SharedMemory> uncheckedConvert( - const hardware::hidl_vec<hardware::hidl_memory>& memories) { - return convertVec<SharedMemory>(memories); -} - -std::vector<Model::Subgraph> uncheckedConvert(const hardware::hidl_vec<V1_3::Subgraph>& subgraphs) { - return convertVec<Model::Subgraph>(subgraphs); -} - -std::vector<Operand> uncheckedConvert(const hardware::hidl_vec<V1_3::Operand>& operands) { - return convertVec<Operand>(operands); -} - -} // namespace nn -} // namespace android
diff --git a/common/LegacyUtils.cpp b/common/LegacyUtils.cpp deleted file mode 100644 index 79dd59d..0000000 --- a/common/LegacyUtils.cpp +++ /dev/null
@@ -1,1867 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "Utils" - -#include "LegacyUtils.h" - -#include <android-base/logging.h> -#include <android-base/properties.h> -#include <android-base/strings.h> -#include <errno.h> -#include <nnapi/TypeUtils.h> -#include <poll.h> - -#include <algorithm> -#include <functional> -#include <limits> -#include <numeric> -#include <string> -#include <tuple> -#include <unordered_map> -#include <utility> -#include <vector> - -#include "ControlFlow.h" -#include "NeuralNetworks.h" -#include "NeuralNetworksOEM.h" -#include "OperationResolver.h" - -namespace android { -namespace nn { - -const char kVLogPropKey[] = "debug.nn.vlog"; -int vLogMask = ~0; - -// Split the space separated list of tags from verbose log setting and build the -// logging mask from it. note that '1' and 'all' are special cases to enable all -// verbose logging. -// -// NN API verbose logging setting comes from system property debug.nn.vlog. -// Example: -// setprop debug.nn.vlog 1 : enable all logging tags. -// setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and -// COMPILATION tags. -void initVLogMask() { - vLogMask = 0; - const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, ""); - if (vLogSetting.empty()) { - return; - } - - std::unordered_map<std::string, int> vLogFlags = {{"1", -1}, - {"all", -1}, - {"model", MODEL}, - {"compilation", COMPILATION}, - {"execution", EXECUTION}, - {"cpuexe", CPUEXE}, - {"manager", MANAGER}, - {"driver", DRIVER}, - {"memory", MEMORY}}; - - std::vector<std::string> elements = android::base::Split(vLogSetting, " ,:"); - for (const auto& elem : elements) { - const auto& flag = vLogFlags.find(elem); - if (flag == vLogFlags.end()) { - LOG(ERROR) << "Unknown trace flag: " << elem; - continue; - } - - if (flag->second == -1) { - // -1 is used for the special values "1" and "all" that enable all - // tracing. - vLogMask = ~0; - return; - } else { - vLogMask |= 1 << flag->second; - } - } -} - -Duration makeTimeoutDuration(uint64_t nanoseconds) { - constexpr auto kMaxCount = Duration::max().count(); - using CommonType = std::common_type_t<Duration::rep, uint64_t>; - const auto count = std::min<CommonType>(kMaxCount, nanoseconds); - return Duration{static_cast<Duration::rep>(count)}; -} - -OptionalDuration makeTimeoutDuration(int64_t nanoseconds) { - CHECK_GE(nanoseconds, -1); - if (nanoseconds == -1) { - return OptionalDuration{}; - } - return makeTimeoutDuration(static_cast<uint64_t>(nanoseconds)); -} - -TimePoint makeDeadline(Duration duration) { - constexpr auto kMaxTime = TimePoint::max(); - const auto currentTime = Clock::now(); - - // If there would be an overflow, use the max value. - if (duration > kMaxTime - currentTime) { - return kMaxTime; - } - return currentTime + duration; -} - -bool hasDeadlinePassed(const OptionalTimePoint& deadline) { - if (!deadline.has_value()) { - return false; - } - return Clock::now() >= *deadline; -} - -static bool isExtensionOperandType(int32_t type) { - return (static_cast<uint32_t>(type) >> kExtensionTypeBits) != 0; -} - -static bool isExtensionOperationType(ANeuralNetworksOperationType type) { - return (static_cast<uint32_t>(type) >> kExtensionTypeBits) != 0; -} - -bool isExtensionOperandType(OperandType type) { - return isExtensionOperandType(static_cast<int32_t>(type)); -} - -bool isExtensionOperationType(OperationType type) { - return isExtensionOperationType(static_cast<int32_t>(type)); -} - -namespace { - -template <typename EntryType, uint32_t entryCount, uint32_t entryCountOEM> -EntryType tableLookup(const EntryType (&table)[entryCount], - const EntryType (&tableOEM)[entryCountOEM], uint32_t code) { - if (code < entryCount) { - return table[code]; - } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) { - return tableOEM[code - kOEMCodeBase]; - } else { - nnAssert(!"tableLookup: bad code"); - return EntryType(); - } -} - -static Version convert(HalVersion halVersion) { - switch (halVersion) { - case HalVersion::UNKNOWN: - break; - case HalVersion::V1_0: - return Version::ANDROID_OC_MR1; - case HalVersion::V1_1: - return Version::ANDROID_P; - case HalVersion::V1_2: - return Version::ANDROID_Q; - case HalVersion::V1_3: - return Version::ANDROID_R; - case HalVersion::AIDL_UNSTABLE: - return Version::ANDROID_S; - } - LOG(FATAL) << "Cannot convert " << halVersion; - return {}; -} - -class OperationValidationContext : public IOperationValidationContext { - DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext); - - public: - OperationValidationContext(const char* operationName, uint32_t inputCount, - const uint32_t* inputIndexes, uint32_t outputCount, - const uint32_t* outputIndexes, const Operand* operands) - : operationName(operationName), - inputCount(inputCount), - inputIndexes(inputIndexes), - outputCount(outputCount), - outputIndexes(outputIndexes), - operands(operands) {} - - const char* getOperationName() const override; - - uint32_t getNumInputs() const override; - OperandType getInputType(uint32_t index) const override; - Shape getInputShape(uint32_t index) const override; - const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override; - - uint32_t getNumOutputs() const override; - OperandType getOutputType(uint32_t index) const override; - Shape getOutputShape(uint32_t index) const override; - - private: - const Operand* getInputOperand(uint32_t index) const; - const Operand* getOutputOperand(uint32_t index) const; - - const char* operationName; - uint32_t inputCount; - const uint32_t* inputIndexes; - uint32_t outputCount; - const uint32_t* outputIndexes; - const Operand* operands; -}; - -const char* OperationValidationContext::getOperationName() const { - return operationName; -} - -const Operand* OperationValidationContext::getInputOperand(uint32_t index) const { - CHECK(index < static_cast<uint32_t>(inputCount)); - return &operands[inputIndexes[index]]; -} - -const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const { - CHECK(index < static_cast<uint32_t>(outputCount)); - return &operands[outputIndexes[index]]; -} - -uint32_t OperationValidationContext::getNumInputs() const { - return inputCount; -} - -uint32_t OperationValidationContext::getNumOutputs() const { - return outputCount; -} - -OperandType OperationValidationContext::getInputType(uint32_t index) const { - return getInputOperand(index)->type; -} - -Shape OperationValidationContext::getInputShape(uint32_t index) const { - const Operand* operand = getInputOperand(index); - return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, - operand->extraParams}; -} - -const Operand::ExtraParams& OperationValidationContext::getInputExtraParams(uint32_t index) const { - return getInputOperand(index)->extraParams; -} - -OperandType OperationValidationContext::getOutputType(uint32_t index) const { - return getOutputOperand(index)->type; -} - -Shape OperationValidationContext::getOutputShape(uint32_t index) const { - const Operand* operand = getOutputOperand(index); - return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, - operand->extraParams}; -} - -}; // anonymous namespace - -#define COUNT(X) (sizeof(X) / sizeof(X[0])) - -const uint32_t kSizeOfDataType[]{ - 4, // ANEURALNETWORKS_FLOAT32 - 4, // ANEURALNETWORKS_INT32 - 4, // ANEURALNETWORKS_UINT32 - 4, // ANEURALNETWORKS_TENSOR_FLOAT32 - 4, // ANEURALNETWORKS_TENSOR_INT32 - 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM - 1, // ANEURALNETWORKS_BOOL - 2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM - 2, // ANEURALNETWORKS_TENSOR_FLOAT16 - 1, // ANEURALNETWORKS_TENSOR_BOOL8 - 2, // ANEURALNETWORKS_FLOAT16 - 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL - 2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM - 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM - 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED - 0, // ANEURALNETWORKS_MODEL -}; - -static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect"); - -const bool kScalarDataType[]{ - true, // ANEURALNETWORKS_FLOAT32 - true, // ANEURALNETWORKS_INT32 - true, // ANEURALNETWORKS_UINT32 - false, // ANEURALNETWORKS_TENSOR_FLOAT32 - false, // ANEURALNETWORKS_TENSOR_INT32 - false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM - true, // ANEURALNETWORKS_BOOL - false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM - false, // ANEURALNETWORKS_TENSOR_FLOAT16 - false, // ANEURALNETWORKS_TENSOR_BOOL8 - true, // ANEURALNETWORKS_FLOAT16 - false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL - false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM - false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM - false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED - true, // ANEURALNETWORKS_MODEL -}; - -static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect"); - -const uint32_t kSizeOfDataTypeOEM[]{ - 0, // ANEURALNETWORKS_OEM - 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE -}; - -static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM, - "kSizeOfDataTypeOEM is incorrect"); - -const bool kScalarDataTypeOEM[]{ - true, // ANEURALNETWORKS_OEM - false, // ANEURALNETWORKS_TENSOR_OEM_BYTE -}; - -static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM, - "kScalarDataTypeOEM is incorrect"); - -bool nonExtensionOperandTypeIsScalar(int type) { - CHECK(!isExtensionOperandType(type)) << "Extension operand types are not supported"; - return tableLookup(kScalarDataType, kScalarDataTypeOEM, type); -} - -uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) { - const size_t size = getNonExtensionSize(type, dimensions).value(); - CHECK_LE(size, std::numeric_limits<uint32_t>::max()); - return size; -} - -// Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t. -static std::pair<bool, uint32_t> sizeOfTensorDataHelper(uint32_t sizeOfElement, - const std::vector<uint32_t>& dimensions) { - if (dimensions.empty()) { - return {false, 0}; - } - uint64_t size = static_cast<uint64_t>(sizeOfElement); - constexpr uint64_t kMaxSize = static_cast<uint64_t>(std::numeric_limits<uint32_t>::max()); - for (uint32_t d : dimensions) { - size *= d; - if (size > kMaxSize) return {true, 0}; - } - return {false, static_cast<uint32_t>(size)}; -} - -uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& dimensions) { - const auto [overflow, size] = sizeOfTensorDataHelper(sizeOfElement, dimensions); - CHECK(!overflow); - return size; -} - -bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, - const std::vector<uint32_t>& dimensions) { - CHECK(!isExtension(type)) << "Size of extension operand data is unknown"; - int n = static_cast<int>(type); - uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n); - return tableLookup(kScalarDataType, kScalarDataTypeOEM, n) - ? false - : sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions); -} - -bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement, - const std::vector<uint32_t>& dimensions) { - return sizeOfTensorDataHelper(sizeOfElement, dimensions).first; -} - -bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount) { - if (!isExtensionOperandType(type)) { - CHECK(!nonExtensionOperandTypeIsScalar(type)) - << "A scalar type can never have unspecified dimensions"; - } - return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount); -} - -bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t>& dimensions) { - return tensorHasUnspecifiedDimensions(static_cast<int>(type), dimensions.data(), - dimensions.size()); -} - -bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) { - return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount); -} - -bool tensorHasUnspecifiedDimensions(const Operand& operand) { - return tensorHasUnspecifiedDimensions(operand.type, operand.dimensions); -} - -uint32_t alignBytesNeeded(uint32_t index, size_t length) { - uint32_t alignment = getAlignmentForLength(length); - uint32_t pattern = alignment - 1; - uint32_t extra = (~(index - 1)) & pattern; - return extra; -} - -void logModelToInfo(const Model& model) { - LOG(INFO) << model; -} - -static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK_EQ(type.dimensionCount, 0u) << tag << " invalid dimensions for scalar type"; - NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type"; - return true; -} - -static bool validateQuant8AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255) - << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType& type, - const char* tag) { - NN_RET_CHECK(-128 <= type.zeroPoint && type.zeroPoint <= 127) - << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateQuant16AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535) - << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateQuantSymmParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateNoQuantParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; - NN_RET_CHECK_EQ(type.scale, 0.f) << tag << " scale is not zero"; - return true; -} - -static bool validateTensorDimensions( - const ANeuralNetworksOperandType& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, - bool allowPartial) { - if (!allowPartial) { - NN_RET_CHECK_GT(type.dimensionCount, 0u) << tag << " invalid operand dimensions"; - } - uint64_t size = - isExtensionOperandType(type.type) - ? extensionOperandTypeInfo->byteSize - : tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, static_cast<int>(type.type)); - constexpr uint64_t kMaxSize = std::numeric_limits<uint32_t>::max(); - for (uint32_t i = 0; i < type.dimensionCount; i++) { - if (!allowPartial) { - NN_RET_CHECK_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions"; - } - if (type.dimensions[i] != 0) { - size *= type.dimensions[i]; - NN_RET_CHECK_LE(size, kMaxSize) << tag << " operand byte size exceeds " << kMaxSize; - } - } - return true; -} - -static bool validateOperandTypeHelper( - const ANeuralNetworksOperandType& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, - bool allowPartial) { - NN_RET_CHECK_EQ(type.dimensionCount == 0, type.dimensions == nullptr); - if (isExtensionOperandType(type.type)) { - NN_RET_CHECK(extensionOperandTypeInfo != nullptr); - if (extensionOperandTypeInfo->isTensor) { - NN_RET_CHECK( - validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); - } else { - NN_RET_CHECK(validateScalarDimensions(type, tag)); - } - return validateNoQuantParams(type, tag); - } - - NN_RET_CHECK(extensionOperandTypeInfo == nullptr); - NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type)) - << tag << " invalid OperandType: " << type.type; - - bool isScalar = tableLookup(kScalarDataType, kScalarDataTypeOEM, type.type); - if (isScalar) { - NN_RET_CHECK(validateScalarDimensions(type, tag)); - if (type.type != ANEURALNETWORKS_OEM_SCALAR) { // Historically, we have allowed OEM types - // to use quantization parameters. - NN_RET_CHECK(validateNoQuantParams(type, tag)); - } - } else { - NN_RET_CHECK(validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); - if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) { - NN_RET_CHECK(validateQuant8AsymmParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RET_CHECK(validateQuant8AsymmSignedParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) { - NN_RET_CHECK(validateQuant8SymmParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) { - NN_RET_CHECK(validateQuant16AsymmParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) { - NN_RET_CHECK(validateQuantSymmParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_INT32) { - // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters. - } else if (type.type == ANEURALNETWORKS_TENSOR_OEM_BYTE) { - // Historically, we have allowed OEM types to use quantization parameters. - } else { - NN_RET_CHECK(validateNoQuantParams(type, tag)); - } - } - - return true; -} - -int validateOperandType(const ANeuralNetworksOperandType& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, - const char* tag, bool allowPartial) { - return validateOperandTypeHelper(type, extensionOperandTypeInfo, tag, allowPartial) - ? ANEURALNETWORKS_NO_ERROR - : ANEURALNETWORKS_BAD_DATA; -} - -int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount, - const char* tag) { - for (uint32_t i = 0; i < count; i++) { - if (list[i] >= operandCount) { - LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i] - << ", operandCount " << operandCount; - return ANEURALNETWORKS_BAD_DATA; - } - } - return ANEURALNETWORKS_NO_ERROR; -} - -int validateOperationOperandTypes(const std::vector<Operand>& operands, uint32_t inOperandCount, - const uint32_t* inOperandIndexes, - const std::vector<OperandType>& inExpectedTypes, - uint32_t outOperandCount, const uint32_t* outOperandIndexes, - const std::vector<OperandType>& outExpectedInTypes) { - if (inOperandCount != static_cast<uint32_t>(inExpectedTypes.size()) || - outOperandCount != static_cast<uint32_t>(outExpectedInTypes.size())) { - LOG(ERROR) << "Wrong operand count: expected " << inExpectedTypes.size() << " inputs and " - << outExpectedInTypes.size() << " outputs," - << "got " << inOperandCount << " inputs and " << outOperandCount << " outputs"; - return ANEURALNETWORKS_BAD_DATA; - } - for (uint32_t i = 0; i < inOperandCount; i++) { - if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) { - LOG(ERROR) << "Invalid input tensor type " << operands[inOperandIndexes[i]].type - << " for input " << i << ", expected " << inExpectedTypes[i]; - return ANEURALNETWORKS_BAD_DATA; - } - } - for (uint32_t i = 0; i < outOperandCount; i++) { - if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) { - LOG(ERROR) << "Invalid output tensor type " << operands[outOperandIndexes[i]].type - << " for input " << i << ", expected " << outExpectedInTypes[i]; - return ANEURALNETWORKS_BAD_DATA; - } - } - - return ANEURALNETWORKS_NO_ERROR; -} - -static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion, - HalVersion minSupportedHalVersion) { - if (halVersion < minSupportedHalVersion) { - LOG(ERROR) << "The given inputs and outputs for operation " << opType - << " are only supported in " << minSupportedHalVersion - << " and later (validating using " << halVersion << ")"; - return ANEURALNETWORKS_BAD_DATA; - } - return ANEURALNETWORKS_NO_ERROR; -} - -// Checks if two operands have the same types, ranks (if specified), dimensions -// (if specified), scales, zeroPoints, and extraParams. -static bool compatible(const Operand& a, const Operand& b) { - NN_RET_CHECK(a.type == b.type) << a.type << " != " << b.type; - if (a.dimensions.size() != 0 && b.dimensions.size() != 0) { - NN_RET_CHECK_EQ(a.dimensions.size(), b.dimensions.size()) << "Incompatible dimensions"; - for (uint32_t i = 0, n = a.dimensions.size(); i < n; ++i) { - if (a.dimensions[i] != 0 && b.dimensions[i] != 0) { - NN_RET_CHECK_EQ(a.dimensions[i], b.dimensions[i]) << "Incompatible dimensions"; - } - } - } - NN_RET_CHECK_EQ(a.scale, b.scale); - NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint); - NN_RET_CHECK(a.extraParams == b.extraParams) << a.extraParams << " != " << b.extraParams; - return true; -} - -static bool validateConditionOperand(const Operand& operand) { - NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8) - << "Unexpected condition operand type: " << operand.type; - NN_RET_CHECK_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton"; - NN_RET_CHECK_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton"; - return true; -} - -static void checkSubgraphValidationHelper(const SubgraphValidationHelper& helper) { - CHECK(helper.isValidSubgraphReference != nullptr); - CHECK(helper.getSubgraphInputCount != nullptr); - CHECK(helper.getSubgraphOutputCount != nullptr); - CHECK(helper.getSubgraphInputOperand != nullptr); - CHECK(helper.getSubgraphOutputOperand != nullptr); -} - -static bool validateIfOperation(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, - const uint32_t* outputs, const std::vector<Operand>& operands, - const SubgraphValidationHelper& helper) { - namespace op = operation_if; - checkSubgraphValidationHelper(helper); - NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_IF must have at least 3 inputs"; - NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_IF must have at least 1 output"; - auto validateBranchOperand = [&](const Operand& branchModelOperand) -> bool { - NN_RET_CHECK(helper.isValidSubgraphReference(branchModelOperand)) - << "Operand is not a valid subgraph reference"; - const uint32_t branchModelInputCount = helper.getSubgraphInputCount(branchModelOperand); - const uint32_t branchModelOutputCount = helper.getSubgraphOutputCount(branchModelOperand); - NN_RET_CHECK_EQ(inputCount, op::kFirstInput + branchModelInputCount); - NN_RET_CHECK_EQ(outputCount, branchModelOutputCount); - for (uint32_t i = 0; i < branchModelInputCount; ++i) { - const Operand& innerOperand = *helper.getSubgraphInputOperand(branchModelOperand, i); - const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - } - for (uint32_t i = 0; i < branchModelOutputCount; ++i) { - const Operand& innerOperand = *helper.getSubgraphOutputOperand(branchModelOperand, i); - const Operand& outerOperand = operands[outputs[i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - } - return true; - }; - NN_RET_CHECK(validateConditionOperand(operands[inputs[op::kCondBoolOperand]])) - << "Validation failed for IF condition operand"; - NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kThenModelOperand]])) - << "Validation failed for IF then model"; - NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kElseModelOperand]])) - << "Validation failed for IF else model"; - return true; -} - -static bool validateControlFlowOperandUnknownSize(const SubgraphValidationHelper& helper, - const Operand& operand) { - if (!helper.allowControlFlowOperationWithOperandOfUnknownSize && !isExtension(operand.type)) { - NN_RET_CHECK_NE(nonExtensionOperandSizeOfData(operand.type, operand.dimensions), 0u); - } - return true; -} - -static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs, - uint32_t outputCount, const uint32_t* outputs, - const std::vector<Operand>& operands, - const SubgraphValidationHelper& helper) { - // Let the loop have - // - m >= 1 input-output operands, - // - k >= 0 state-only operands, and - // - n >= 0 input-only operands. - // Then - // - the WHILE loop operation has (2 + m + k + n) inputs and m outputs. - // - the condition model has (m + k + n) inputs and 1 output. - // - the body model has (m + k + n) inputs and (m + k) outputs. - namespace op = operation_while; - checkSubgraphValidationHelper(helper); - NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_WHILE must have at least 3 inputs"; - NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_WHILE must have at least 1 output"; - auto validateCondOperand = [&](const Operand& condModelOperand) -> bool { - NN_RET_CHECK(helper.isValidSubgraphReference(condModelOperand)) - << "Operand is not a valid subgraph reference"; - const uint32_t condModelInputCount = helper.getSubgraphInputCount(condModelOperand); - const uint32_t condModelOutputCount = helper.getSubgraphOutputCount(condModelOperand); - NN_RET_CHECK_EQ(inputCount, op::kFirstInput + condModelInputCount); - NN_RET_CHECK_EQ(condModelOutputCount, 1u); - for (uint32_t i = 0; i < condModelInputCount; ++i) { - const Operand& innerOperand = *helper.getSubgraphInputOperand(condModelOperand, i); - const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); - } - NN_RET_CHECK( - validateConditionOperand(*helper.getSubgraphOutputOperand(condModelOperand, 0))); - return true; - }; - auto validateBodyOperand = [&](const Operand& bodyModelOperand) -> bool { - NN_RET_CHECK(helper.isValidSubgraphReference(bodyModelOperand)) - << "Operand is not a valid subgraph reference"; - const uint32_t bodyModelInputCount = helper.getSubgraphInputCount(bodyModelOperand); - const uint32_t bodyModelOutputCount = helper.getSubgraphOutputCount(bodyModelOperand); - NN_RET_CHECK_EQ(inputCount, op::kFirstInput + bodyModelInputCount); - NN_RET_CHECK_GE(bodyModelOutputCount, outputCount); - NN_RET_CHECK_GE(bodyModelInputCount, bodyModelOutputCount); - const uint32_t inputOutputCount = outputCount; - const uint32_t stateOnlyCount = bodyModelOutputCount - inputOutputCount; - const uint32_t inputOnlyCount = bodyModelInputCount - bodyModelOutputCount; - for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount + inputOnlyCount; i < n; ++i) { - const Operand& innerOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i); - const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); - } - for (uint32_t i = 0; i < inputOutputCount; ++i) { - const Operand& innerOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i); - const Operand& outerOperand = operands[outputs[i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); - } - for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount; i < n; ++i) { - const Operand& inputOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i); - const Operand& outputOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i); - NN_RET_CHECK(compatible(inputOperand, outputOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outputOperand)); - } - return true; - }; - NN_RET_CHECK(validateCondOperand(operands[inputs[op::kCondModelOperand]])) - << "Validation failed for WHILE condition model"; - NN_RET_CHECK(validateBodyOperand(operands[inputs[op::kBodyModelOperand]])) - << "Validation failed for WHILE body model"; - return true; -} - -int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, - const uint32_t* inputIndexes, uint32_t outputCount, - const uint32_t* outputIndexes, const std::vector<Operand>& operands, - HalVersion halVersion, const SubgraphValidationHelper& helper) { - NN_RETURN_IF_ERROR(validateOperandList(inputCount, inputIndexes, - static_cast<uint32_t>(operands.size()), - "ANeuralNetworksModel_addOperation inputs")); - NN_RETURN_IF_ERROR(validateOperandList(outputCount, outputIndexes, - static_cast<uint32_t>(operands.size()), - "ANeuralNetworksModel_addOperation outputs")); - - if (isExtensionOperationType(opType)) { - if (halVersion < HalVersion::V1_2) { - LOG(ERROR) - << "Extension operations are supported since HAL version 1.2, validating using " - << halVersion; - return ANEURALNETWORKS_BAD_DATA; - } - // There is no other validation we can do for an extension operation. - return ANEURALNETWORKS_NO_ERROR; - } - - auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn - << ") or output operands (" << outputCount << ", expected " << expOut - << ") for operation " << opType; - }; - - switch (opType) { - case ANEURALNETWORKS_OEM_OPERATION: { - return ANEURALNETWORKS_NO_ERROR; - } - case ANEURALNETWORKS_RESHAPE: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - if (inputRank > 4) { - LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_DEPTH_TO_SPACE: { - if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 3 or 2) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputCount == 3) { - inExpectedTypes.push_back(OperandType::BOOL); - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_SPACE_TO_DEPTH: { - if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 3 or 2) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputCount == 3) { - inExpectedTypes.push_back(OperandType::BOOL); - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_EMBEDDING_LOOKUP: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[1]].type; - if (inputType != OperandType::TENSOR_FLOAT16 && - inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_INT32 && - inputType != OperandType::TENSOR_QUANT8_ASYMM && - inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32, inputType}; - std::vector<OperandType> outExpectedTypes = {inputType}; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else if (inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_HASHTABLE_LOOKUP: { - if (inputCount != 3 || outputCount != 2) { - logInvalidInOutNumber(3, 2); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[2]].type; - if (inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_INT32 && - inputType != OperandType::TENSOR_QUANT8_ASYMM) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, inputType}; - std::vector<OperandType> outExpectedTypes = {inputType, - OperandType::TENSOR_QUANT8_ASYMM}; - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_LSH_PROJECTION: { - if (inputCount != 4 || outputCount != 1) { - logInvalidInOutNumber(4, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[1]].type; - if (inputType != OperandType::TENSOR_FLOAT16 && - inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_INT32 && - inputType != OperandType::TENSOR_QUANT8_ASYMM) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto hashType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - if (hashType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - inputType, - OperandType::TENSOR_FLOAT16, - OperandType::INT32, - }; - } else if (hashType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - inputType, - OperandType::TENSOR_FLOAT32, - OperandType::INT32, - }; - } else { - LOG(ERROR) << "Unsupported hash tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM: { - const uint32_t kNumOutputs = 2; - const uint32_t kNumOutputsMerged = 1; - const uint32_t kNumOutputsWithState = 6; - const uint32_t kNumOutputsMergedWithState = 5; - if (inputCount != 61 || - (outputCount != kNumOutputs && outputCount != kNumOutputsMerged && - outputCount != kNumOutputsWithState && - outputCount != kNumOutputsMergedWithState)) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 61) or output operands (" << outputCount - << ", expected 1, 2, 5 or 6) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - std::vector<OperandType> inExpectedTypes; - auto inputType = operands[inputIndexes[0]].type; - if (inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_FLOAT16) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - inExpectedTypes = {}; - for (int i = 0; i < 48; ++i) { - inExpectedTypes.push_back(inputType); - } - inExpectedTypes.push_back(OperandType::INT32); - inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 - ? OperandType::FLOAT32 - : OperandType::FLOAT16); - inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 - ? OperandType::FLOAT32 - : OperandType::FLOAT16); - inExpectedTypes.push_back(OperandType::BOOL); - inExpectedTypes.push_back(OperandType::BOOL); - for (int i = 0; i < 8; ++i) { - inExpectedTypes.push_back(inputType); - } - - HalVersion minSupportedHalVersion = HalVersion::V1_2; - if (outputCount == kNumOutputsWithState || outputCount == kNumOutputsMergedWithState) { - minSupportedHalVersion = HalVersion::V1_3; - } - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, minSupportedHalVersion)); - std::vector<OperandType> outExpectedTypes(outputCount, inputType); - auto status = validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - return status; - } - case ANEURALNETWORKS_LSTM: { - if ((inputCount != 23 && inputCount != 27) || outputCount != 4) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 23 or 27) or output operands (" << outputCount - << ", expected 4) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - auto inputType = operands[inputIndexes[0]].type; - if (inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_FLOAT16) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - inExpectedTypes = {inputType, inputType, inputType, inputType, inputType, - inputType, inputType, inputType, inputType, inputType, - inputType, inputType, inputType, inputType, inputType, - inputType, inputType, inputType, inputType, inputType, - OperandType::INT32}; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes.push_back(OperandType::FLOAT32); - inExpectedTypes.push_back(OperandType::FLOAT32); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes.push_back(OperandType::FLOAT16); - inExpectedTypes.push_back(OperandType::FLOAT16); - } - - outExpectedTypes = {inputType, inputType, inputType, inputType}; - if (inputCount == 23) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - for (int i = 0; i < 4; ++i) { - inExpectedTypes.push_back(inputType); - } - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_QUANTIZED_16BIT_LSTM: { - if (inputCount != 15 || outputCount != 2) { - logInvalidInOutNumber(15, 2); - return ANEURALNETWORKS_BAD_DATA; - } - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - std::vector<OperandType> inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM, - OperandType::TENSOR_QUANT8_ASYMM}; - std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM, - OperandType::TENSOR_QUANT8_ASYMM}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_RANDOM_MULTINOMIAL: { - if (inputCount != 3 || outputCount != 1) { - logInvalidInOutNumber(3, 1); - return ANEURALNETWORKS_BAD_DATA; - } - OperandType inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - inputType, - OperandType::INT32, - OperandType::TENSOR_INT32, - }; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_RNN: { - if (inputCount != 6 || outputCount != 2) { - logInvalidInOutNumber(6, 2); - return ANEURALNETWORKS_BAD_DATA; - } - OperandType inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, OperandType::INT32, - }; - outExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, - }; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, OperandType::INT32, - }; - outExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, - }; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_SVDF: { - if (inputCount != 7 || outputCount != 2) { - logInvalidInOutNumber(7, 2); - return ANEURALNETWORKS_BAD_DATA; - } - OperandType inputType = operands[inputIndexes[0]].type; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector<OperandType> inExpectedTypes = { - inputType, inputType, inputType, inputType, - inputType, OperandType::INT32, OperandType::INT32, - }; - std::vector<OperandType> outExpectedTypes = {inputType, inputType}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_BATCH_TO_SPACE_ND: { - if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 3 or 2) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputCount == 3) { - inExpectedTypes.push_back(OperandType::BOOL); - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_SPACE_TO_BATCH_ND: { - if ((inputCount != 4 && inputCount != 3) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 4 or 3) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - if (operands[inputIndexes[0]].zeroPoint != 0) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputCount == 4) { - inExpectedTypes.push_back(OperandType::BOOL); - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_PAD: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - if (operands[inputIndexes[0]].zeroPoint == 0) { - NN_RETURN_IF_ERROR( - validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } else { - NN_RETURN_IF_ERROR( - validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - } - inExpectedTypes = { - inputType, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - if (inputRank > 4) { - LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_PAD_V2: { - if (inputCount != 3 || outputCount != 1) { - logInvalidInOutNumber(3, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - OperandType::FLOAT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - OperandType::FLOAT16, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - inExpectedTypes = { - inputType, - OperandType::TENSOR_INT32, - OperandType::INT32, - }; // TODO(b/116699425): Make it UINT8. - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - if (inputRank > 4) { - LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_CAST: { - if (inputCount != 1 || outputCount != 1) { - logInvalidInOutNumber(1, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputOperand = operands[inputIndexes[0]]; - auto outputOperand = operands[outputIndexes[0]]; - auto inputType = inputOperand.type; - auto outputType = outputOperand.type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if ((inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM) && - (outputType == OperandType::TENSOR_FLOAT16 || - outputType == OperandType::TENSOR_FLOAT32 || - outputType == OperandType::TENSOR_INT32 || - outputType == OperandType::TENSOR_QUANT8_ASYMM)) { - inExpectedTypes = {inputType}; - outExpectedTypes = {outputType}; - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else if (inputType == OperandType::TENSOR_BOOL8 || - inputType == OperandType::TENSOR_QUANT16_ASYMM || - inputType == OperandType::TENSOR_QUANT16_SYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || - inputType == OperandType::TENSOR_QUANT8_SYMM) { - inExpectedTypes = {inputType}; - outExpectedTypes = {inputType}; // Only identity CAST is supported. - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - LOG(ERROR) << "Unsupported data type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - // Validate that output shape is equal to input shape if dimensions - // are already known. - auto getNumberOfElements = [](const std::vector<uint32_t>& dims) { - if (dims.size() == 0) { - return 0; - } - return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<>()); - }; - if (inputOperand.dimensions.size() != 0 && outputOperand.dimensions.size() != 0 && - getNumberOfElements(outputOperand.dimensions) != 0 && - inputOperand.dimensions != outputOperand.dimensions) { - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_MEAN: { - if (inputCount != 3 || outputCount != 1) { - logInvalidInOutNumber(3, 1); - return ANEURALNETWORKS_BAD_DATA; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - if (inputRank > 4) { - LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector<OperandType> inExpectedTypes = {inputType, OperandType::TENSOR_INT32, - OperandType::INT32}; - std::vector<OperandType> outExpectedTypes = {inputType}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_ARGMAX: - case ANEURALNETWORKS_ARGMIN: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_INT32}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_EXPAND_DIMS: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, OperandType::INT32}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_SPLIT: { - if (inputCount != 3) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)" - << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - if (inputType != OperandType::TENSOR_FLOAT16 && - inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_INT32 && - inputType != OperandType::TENSOR_QUANT8_ASYMM && - inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - std::vector<OperandType> inExpectedTypes = {inputType, OperandType::INT32, - OperandType::INT32}; - std::vector<OperandType> outExpectedTypes(outputCount, inputType); - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_MAXIMUM: - case ANEURALNETWORKS_MINIMUM: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - OperandType inputType = operands[inputIndexes[0]].type; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, inputType}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_GROUPED_CONV_2D: { - if ((inputCount != 12 && inputCount != 9) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 12 or 9) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - auto filterType = operands[inputIndexes[1]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, OperandType::INT32, - OperandType::INT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - if (filterType != inputType && - filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - LOG(ERROR) << "Unsupported filter tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL && - std::get<Operand::SymmPerChannelQuantParams>( - operands[inputIndexes[1]].extraParams) - .channelDim != 0) { - LOG(ERROR) << "Unsupported filter tensor channel dimension for operation " - << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - inExpectedTypes = { - inputType, filterType, OperandType::TENSOR_INT32, - OperandType::INT32, OperandType::INT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - if (inputCount == 12) { - std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32); - inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(), - explicitScalarTypes.end()); - } - inExpectedTypes.push_back(OperandType::BOOL); - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_TILE: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, OperandType::TENSOR_INT32}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_POW: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = {inputType, inputType}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_IF: { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - return validateIfOperation(inputCount, inputIndexes, outputCount, outputIndexes, - operands, helper) - ? ANEURALNETWORKS_NO_ERROR - : ANEURALNETWORKS_BAD_DATA; - } - case ANEURALNETWORKS_WHILE: { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - return validateWhileOperation(inputCount, inputIndexes, outputCount, outputIndexes, - operands, helper) - ? ANEURALNETWORKS_NO_ERROR - : ANEURALNETWORKS_BAD_DATA; - } - default: { - const OperationRegistration* operationRegistration = - BuiltinOperationResolver::get()->findOperation( - static_cast<OperationType>(opType)); - if (operationRegistration == nullptr) { - if (0 <= opType && opType < kNumberOfOperationTypes) { - LOG(ERROR) << opType << " not registered"; - } else { - LOG(ERROR) << "Operation type " << opType << " out of the range [0, " - << kNumberOfOperationTypes << ")"; - } - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - if (operationRegistration->validate == nullptr) { - LOG(ERROR) << "Incomplete operation registration: " << opType; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - OperationValidationContext context(operationRegistration->name, inputCount, - inputIndexes, outputCount, outputIndexes, - operands.data()); - const auto maybeVersion = operationRegistration->validate(&context); - if (!maybeVersion.has_value()) { - LOG(ERROR) << "Validation failed for operation " << opType << ": " - << maybeVersion.error(); - return ANEURALNETWORKS_BAD_DATA; - } - if (!validateVersion(&context, convert(halVersion), maybeVersion.value())) { - LOG(ERROR) << "Validation failed for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return ANEURALNETWORKS_NO_ERROR; - } - } -} - -ErrorStatus convertResultCodeToErrorStatus(int resultCode) { - switch (resultCode) { - case ANEURALNETWORKS_NO_ERROR: - return ErrorStatus::NONE; - - case ANEURALNETWORKS_BAD_DATA: - case ANEURALNETWORKS_UNEXPECTED_NULL: - return ErrorStatus::INVALID_ARGUMENT; - - case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE: - return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; - - case ANEURALNETWORKS_UNAVAILABLE_DEVICE: - return ErrorStatus::DEVICE_UNAVAILABLE; - - case ANEURALNETWORKS_BAD_STATE: - case ANEURALNETWORKS_INCOMPLETE: - case ANEURALNETWORKS_OP_FAILED: - case ANEURALNETWORKS_OUT_OF_MEMORY: - case ANEURALNETWORKS_UNMAPPABLE: - case ANEURALNETWORKS_DEAD_OBJECT: - return ErrorStatus::GENERAL_FAILURE; - - case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT: - return ErrorStatus::MISSED_DEADLINE_TRANSIENT; - case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT: - return ErrorStatus::MISSED_DEADLINE_PERSISTENT; - case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT: - return ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT; - case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT: - return ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT; - } - LOG(ERROR) << "Unknown result code " << resultCode << " mapped to ErrorStatus::GENERAL_FAILURE"; - return ErrorStatus::GENERAL_FAILURE; -} - -int convertErrorStatusToResultCode(ErrorStatus status) { - switch (status) { - case ErrorStatus::NONE: - return ANEURALNETWORKS_NO_ERROR; - case ErrorStatus::DEVICE_UNAVAILABLE: - return ANEURALNETWORKS_UNAVAILABLE_DEVICE; - case ErrorStatus::GENERAL_FAILURE: - return ANEURALNETWORKS_OP_FAILED; - case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: - return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE; - case ErrorStatus::INVALID_ARGUMENT: - return ANEURALNETWORKS_BAD_DATA; - case ErrorStatus::MISSED_DEADLINE_TRANSIENT: - return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT; - case ErrorStatus::MISSED_DEADLINE_PERSISTENT: - return ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT; - case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: - return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT; - case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: - return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT; - case ErrorStatus::DEAD_OBJECT: - return ANEURALNETWORKS_DEAD_OBJECT; - } - LOG(ERROR) << "Unknown ErrorStatus " << status << " mapped to ANEURALNETWORKS_OP_FAILED"; - return ANEURALNETWORKS_OP_FAILED; -} - -std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult( - ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing) { - constexpr Timing kNoTiming = {}; - const int n = convertErrorStatusToResultCode(status); - if (status != ErrorStatus::NONE && status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE && - !outputShapes.empty()) { - LOG(ERROR) << "The driver returned OutputShapes when it shouldn't."; - outputShapes.clear(); - } - if (status != ErrorStatus::NONE && timing != kNoTiming) { - LOG(ERROR) << "The driver returned Timing when it shouldn't."; - timing = kNoTiming; - } - return {n, std::move(outputShapes), timing}; -} - -FenceState syncWait(int fd, int timeout) { - // This implementation is directly based on the ::sync_wait() implementation. - - struct pollfd fds; - int ret; - - if (fd < 0) { - errno = EINVAL; - return FenceState::UNKNOWN; - } - - fds.fd = fd; - fds.events = POLLIN; - - do { - ret = poll(&fds, 1, timeout); - if (ret > 0) { - if (fds.revents & POLLNVAL) { - errno = EINVAL; - return FenceState::UNKNOWN; - } - if (fds.revents & POLLERR) { - errno = EINVAL; - return FenceState::ERROR; - } - return FenceState::SIGNALED; - } else if (ret == 0) { - errno = ETIME; - return FenceState::ACTIVE; - } - } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); - - return FenceState::UNKNOWN; -} - -#ifdef NN_DEBUGGABLE -uint32_t getProp(const char* str, uint32_t defaultValue) { - const std::string propStr = android::base::GetProperty(str, ""); - if (propStr.size() > 0) { - return std::stoi(propStr); - } else { - return defaultValue; - } -} -#endif // NN_DEBUGGABLE - -} // namespace nn -} // namespace android
diff --git a/common/LogTagTest.cpp b/common/LogTagTest.cpp index ad837a9..459afbe 100644 --- a/common/LogTagTest.cpp +++ b/common/LogTagTest.cpp
@@ -19,8 +19,7 @@ #include <android-base/logging.h> #include <gmock/gmock.h> #include <gtest/gtest.h> - -#include "LegacyUtils.h" +#include "Utils.h" namespace log_tag_test {
diff --git a/common/LogTagTestExtra.cpp b/common/LogTagTestExtra.cpp index c9b8cb7..9e9a9a2 100644 --- a/common/LogTagTestExtra.cpp +++ b/common/LogTagTestExtra.cpp
@@ -16,7 +16,7 @@ #define LOG_TAG "SecondFileTag" -#include "LegacyUtils.h" +#include "Utils.h" namespace log_tag_test {
diff --git a/common/MetaModel.cpp b/common/MetaModel.cpp index 3d0e840..30d88a1 100644 --- a/common/MetaModel.cpp +++ b/common/MetaModel.cpp
@@ -20,47 +20,138 @@ #include <algorithm> #include <map> -#include <numeric> #include <set> #include <sstream> #include <type_traits> #include <utility> -#include <vector> #include "GraphDump.h" -#include "LegacyUtils.h" -#include "nnapi/TypeUtils.h" -#include "nnapi/Types.h" -#include "nnapi/Validation.h" +#include "HalInterfaces.h" +#include "Utils.h" namespace android::nn { +using namespace hal; + namespace { +// Add an element to the end of the vector and return a pair consisting of the +// index of the new element and a pointer to the new element. +template <class T> +std::pair<uint32_t, T*> extend(hidl_vec<T>* vec) { + size_t nextIndex = vec->size(); + vec->resize(nextIndex + 1); + return {nextIndex, &(*vec)[nextIndex]}; +} + // Add an element to the end of the vector, set it to the specified value, and // return a pair consisting of the index of the new element and a pointer to the // new element. template <class T> -std::pair<uint32_t, T*> extend(std::vector<T>* vec, const T& val) { - vec->push_back(val); - return {vec->size() - 1, &vec->back()}; +std::pair<uint32_t, T*> extend(hidl_vec<T>* vec, const T& val) { + auto extended = extend(vec); + *extended.second = val; + return extended; } -// Add an element to the end of the vector and return a pair consisting of the -// index of the new element and a pointer to the new element. -template <class T> -std::pair<uint32_t, T*> extend(std::vector<T>* vec) { - return extend(vec, {}); +template <typename T> +bool operator<(const hidl_vec<T>& a, const hidl_vec<T>& b) { + return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); } -bool invalid(const Model& model, Version version, bool strictSlicing) { +// Compile-time mapping from a particular Model type to a name for that type. +template <class T_Model> +struct ModelVersion; +template <> +struct ModelVersion<hal::V1_0::Model> { + static constexpr char name[] = "V1_0"; +}; +template <> +struct ModelVersion<hal::V1_1::Model> { + static constexpr char name[] = "V1_1"; +}; +template <> +struct ModelVersion<hal::V1_2::Model> { + static constexpr char name[] = "V1_2"; +}; +template <> +struct ModelVersion<hal::V1_3::Model> { + static constexpr char name[] = "V1_3"; +}; + +// Dispatcher mechanism for calling an appropriate uncheckedConvertToV1_* +// given the desired return type. +template <typename T_ReturnType> +T_ReturnType uncheckedConvertTo(OperationType type); +template <> +hal::V1_0::OperationType uncheckedConvertTo<hal::V1_0::OperationType>(OperationType type) { + return uncheckedConvertToV1_0(type); +} +template <> +hal::V1_1::OperationType uncheckedConvertTo<hal::V1_1::OperationType>(OperationType type) { + return uncheckedConvertToV1_1(type); +} +template <> +hal::V1_2::OperationType uncheckedConvertTo<hal::V1_2::OperationType>(OperationType type) { + return uncheckedConvertToV1_2(type); +} + +// Dispatcher mechanism for calling an appropriate convertToV1_* given the +// desired return type. Note that there is no V1_1::Operand type. +template <typename T_ReturnType> +T_ReturnType convertTo(Operand operand); +template <> +hal::V1_0::Operand convertTo<hal::V1_0::Operand>(Operand operand) { + return convertToV1_0(operand); +} +template <> +hal::V1_2::Operand convertTo<hal::V1_2::Operand>(Operand operand) { + return convertToV1_2(operand); +} + +// Dispatcher mechanism for calling an appropriate convertToV1_* given the +// desired return type. Note that there are no V1_[12]::OperandLifeTime types. +template <typename T_ReturnType> +T_ReturnType convertTo(OperandLifeTime lifetime); +template <> +hal::V1_0::OperandLifeTime convertTo<hal::V1_0::OperandLifeTime>(OperandLifeTime lifetime) { + return convertToV1_0(lifetime); +} +template <> +hal::V1_3::OperandLifeTime convertTo<hal::V1_3::OperandLifeTime>(OperandLifeTime lifetime) { + return lifetime; +} + +// Dispatcher mechanism for calling an appropriate compliantWithV1_* given the +// desired target model type. +template <typename T_SlicedModel> +void getNoncompliantOperations(const hal::V1_3::Model& model, + std::set<uint32_t>* noncompliantOperations); +template <> +void getNoncompliantOperations<hal::V1_0::Model>(const hal::V1_3::Model& model, + std::set<uint32_t>* noncompliantOperations) { + compliantWithV1_0(model, noncompliantOperations); +} +template <> +void getNoncompliantOperations<hal::V1_1::Model>(const hal::V1_3::Model& model, + std::set<uint32_t>* noncompliantOperations) { + compliantWithV1_1(model, noncompliantOperations); +} +template <> +void getNoncompliantOperations<hal::V1_2::Model>(const hal::V1_3::Model& model, + std::set<uint32_t>* noncompliantOperations) { + compliantWithV1_2(model, noncompliantOperations); +} + +template <class T_SlicedModel> +bool invalid(const T_SlicedModel& model, bool strictSlicing) { // A model must have at least one operation. However, it's possible that a // slice has no operations (because no operations from the original model // are compliant with the sliced model type). In this case, the sliced // model would be invalid. - const bool looksEmpty = (model.main.operations.size() == 0); + const bool looksEmpty = (model.operations.size() == 0); if (strictSlicing) { - CHECK_EQ(looksEmpty, (model.main.operands.size() == 0)); + CHECK_EQ(looksEmpty, (model.operands.size() == 0)); } if (looksEmpty) return true; @@ -69,19 +160,14 @@ // are data dependent). A slice might contain only dead operations, and // hence have no model outputs. In this case, the sliced model would be // invalid. - if (model.main.outputIndexes.size() == 0) return true; + if (model.outputIndexes.size() == 0) return true; - // We shouldn't have to check whether the model is valid. However, it could - // be invalid if there is an error in the slicing algorithm. - auto maybeVersion = validate(model); - if (!maybeVersion.has_value()) { - LOG(WARNING) << "Sliced model fails validate(): " << maybeVersion.error(); - CHECK(!strictSlicing); - return true; - } - if (maybeVersion.value() > version) { - LOG(WARNING) << "Sliced model fails validate(): insufficient version (" - << maybeVersion.value() << " vs " << version << ")"; + // We shouldn't have to check whether the model is valid. + // However, it could be invalid if: + // - there is an error in the slicing algorithm; or + // - there is an error in compliantWith (see http://b/131845106) + if (!validateModel(model)) { + LOG(WARNING) << "Sliced model fails validateModel()"; CHECK(!strictSlicing); return true; } @@ -91,96 +177,90 @@ } // anonymous namespace -MetaModel::MetaModel(Model model, bool strictSlicing) - : mModel(std::move(model)), - mModelMinimumSupportedVersion(validate(mModel).value()), - mStrictSlicing(strictSlicing) {} - -MetaModel::ReturnedSlice MetaModel::getSlice(Version version) const { - // All slices of versions of at least mModelMinimumSupportedVersion are identical, so do not - // create more than one such slice. - version = std::min(version, mModelMinimumSupportedVersion); - - auto& slice = mCachedSlices[version]; - if (slice.mState == SliceState::UNINITIALIZED) { - slice = makeSlice(version); +template <class T_SlicedModel> +MetaModel::ReturnedSlice<T_SlicedModel> MetaModel::getSlice(Slice<T_SlicedModel>* slice) const { + CHECK(slice != nullptr); + if (slice->mState == SliceState::UNINITIALIZED) { + *slice = makeSlice<T_SlicedModel>(); } - if (slice.mState == SliceState::INVALID) { + if (slice->mState == SliceState::INVALID) { return {}; } - return MetaModel::ReturnedSlice(std::make_pair( - slice.mModel, Mapper([&slice](uint32_t slicedOperationIndex) { - return slice.mSlicedOperationIndexToOrigIndex.at(slicedOperationIndex); + return MetaModel::ReturnedSlice<T_SlicedModel>(std::make_pair( + slice->mHidlModel, Mapper([slice](uint32_t slicedOperationIndex) { + return slice->mSlicedOperationIndexToOrigIndex.at(slicedOperationIndex); }))); } +template MetaModel::ReturnedSlice<hal::V1_0::Model> MetaModel::getSlice( + Slice<hal::V1_0::Model>* slice) const; +template MetaModel::ReturnedSlice<hal::V1_1::Model> MetaModel::getSlice( + Slice<hal::V1_1::Model>* slice) const; +template MetaModel::ReturnedSlice<hal::V1_2::Model> MetaModel::getSlice( + Slice<hal::V1_2::Model>* slice) const; +// When adding HAL version 1.4, make sure to handle control flow and referenced +// subgraphs here properly. A V1_3 sliced model should contain an IF/WHILE and +// its referenced subgraphs only if there are no V1_4+ operations in those +// subgraphs. +// template MetaModel::ReturnedSlice<hal::V1_3::Model> MetaModel::getSlice( +// Slice<hal::V1_3::Model>* slice) const; // Utility class for makeSlice(). // // For each output operand of a noncompliant operation that is the input // operand of at least one compliant operation, we will ensure that there is // a sliced model input whose "type" is that of the output operand. This is -// a map from operand "type" (in the original model) to model input operand -// index (in the sliced model). We only use the subset of the fields that are -// relevant (OperandType, dimensions, scale, zeroPoint, extraParams), but -// exclude irrelevant fields from the map key (lifetime, location). +// a map from operand "type" (in the original model) to model input +// operand index (in the sliced model). Unfortunately, there is no +// representation of operand "type" defined in the HAL that we can use +// naively here -- we want (OperandType, dimensions, scale, zeroPoint, +// extraParams), but these fields exist in Operand along with other fields +// that need to be excluded from the map key (numberOfConsumers, lifetime, +// location). There are several choices: +// - Don't have a map -- each output identified above gets its own sliced +// model input (no sharing of sliced model inputs). +// - Create an operand "type" representation solely for use as a map key. +// - Write a tailored comparison function that ignores the excluded fields. +// We choose to write a tailored comparison function. If Treble were to +// generate a comparison function for us (http://b/130567619) then it might +// be better to instead reset the excluded fields to canonical values -- +// then we could use the Treble provided comparison function, and the +// solution would be robust (in a correctness sense, not a sharing sense) if +// more fields are added and we neglect to canonicalize them. // // We also use this map for model input operands of the original model that // become input operands of the sliced model. This means that an original // model input operand might be commoned with other original model input // operands and/or with original model temporary operands. +template <typename T_SlicedOperand> class MetaModel::OrigOperandToSlicedInputOperandIndex { public: - // `slicedOperands` and `slicedInputIndexes` will be modified as part of - // OrigOperandToSlicedInputOperandIndex::getIndex. `slicedVersion`, `operandValuesSize`, and - // `poolSizes` are used as a check to ensure that the sliced operand is valid and compliant with - // the sliced version. `operandValuesSize` is the size of the operand values in the sliced model - // (which is the same as the original model). `poolSizes` is the size of the memories in the - // sliced model (which is the same as the original model). - OrigOperandToSlicedInputOperandIndex(std::vector<Operand>* slicedOperands, - std::vector<uint32_t>* slicedInputIndexes, - Version slicedVersion, size_t operandValuesSize, - std::vector<size_t> poolSizes) - : mSlicedOperands(*slicedOperands), - mSlicedInputIndexes(*slicedInputIndexes), - kSlicedVersion(slicedVersion), - kOperandValuesSize(operandValuesSize), - kPoolSizes(std::move(poolSizes)) {} + OrigOperandToSlicedInputOperandIndex(hidl_vec<T_SlicedOperand>* slicedOperands, + hidl_vec<uint32_t>* slicedInputIndexes) + : mSlicedOperands(*slicedOperands), mSlicedInputIndexes(*slicedInputIndexes) {} // Given an operand from the original model, return the index of the // corresponding model input operand from the sliced model. Creates a // new operand in the sliced model if necessary. uint32_t getIndex(Operand operand) { - CHECK(operand.lifetime == Operand::LifeTime::SUBGRAPH_INPUT || - operand.lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT || - operand.lifetime == Operand::LifeTime::TEMPORARY_VARIABLE); - // Lookup auto it = mMap.find(operand); if (it != mMap.end()) { VLOG(COMPILATION) << "OrigOperandToSlicedInputOperandIndex::getIndex looked for " - << operand << " and found " << it->second << ": " << it->first; + << toString(operand) << " and found " << it->second << ": " + << toString(it->first); return it->second; } // Create - operand.lifetime = Operand::LifeTime::SUBGRAPH_INPUT; + operand.numberOfConsumers = 0; + operand.lifetime = convertTo<decltype(operand.lifetime)>(OperandLifeTime::SUBGRAPH_INPUT); operand.location = {}; - - // Note that the sliced model does not contain any referenced subgraphs, so both `subgraphs` - // and `subgraphVersionCache` are empty. - const std::vector<Model::Subgraph> subgraphs; - auto subgraphVersionCache = createSubgraphVersionCache(subgraphs.size()); - const auto minimumSupportedOperandVersion = - validateOperandAndAnythingItDependsOn(operand, kOperandValuesSize, kPoolSizes, - subgraphs, subgraphVersionCache.get()) - .value(); - CHECK_LE(minimumSupportedOperandVersion, kSlicedVersion); - - uint32_t slicedOperandIndex = extend(&mSlicedOperands, operand).first; + uint32_t slicedOperandIndex = + extend(&mSlicedOperands, convertTo<T_SlicedOperand>(operand)).first; mMap[operand] = slicedOperandIndex; extend(&mSlicedInputIndexes, slicedOperandIndex); VLOG(COMPILATION) << "OrigOperandToSlicedInputOperandIndex::getIndex created " - << slicedOperandIndex << ": " << operand; + << slicedOperandIndex << ": " << toString(operand); return slicedOperandIndex; } @@ -204,52 +284,55 @@ } private: - static bool compare(const Operand::SymmPerChannelQuantParams& a, - const Operand::SymmPerChannelQuantParams& b) { + static bool compare(const SymmPerChannelQuantParams& a, + const SymmPerChannelQuantParams& b) { if (a.scales != b.scales) { return a.scales < b.scales; } return a.channelDim < b.channelDim; } - static bool compare(const Operand::ExtraParams& a, const Operand::ExtraParams& b) { - if (a.index() != b.index()) { - return a.index() < b.index(); + + static bool compare(const OperandExtraParams& a, const OperandExtraParams& b) { + if (a.getDiscriminator() != b.getDiscriminator()) { + return a.getDiscriminator() < b.getDiscriminator(); } - if (std::holds_alternative<Operand::SymmPerChannelQuantParams>(a)) { - return compare(std::get<Operand::SymmPerChannelQuantParams>(a), - std::get<Operand::SymmPerChannelQuantParams>(b)); + + switch (a.getDiscriminator()) { + case OperandExtraParams::hidl_discriminator::channelQuant: + return compare(a.channelQuant(), b.channelQuant()); + + case OperandExtraParams::hidl_discriminator::extension: + return a.extension() < b.extension(); + + case OperandExtraParams::hidl_discriminator::none: + return false; + + default: + CHECK(false) << "Unexpected"; + return false; } - if (std::holds_alternative<Operand::ExtensionParams>(a)) { - return std::get<Operand::ExtensionParams>(a) < - std::get<Operand::ExtensionParams>(b); - } - if (std::holds_alternative<Operand::NoParams>(a)) { - return false; - } - CHECK(false) << "Unexpected"; - return false; } }; std::map<Operand, uint32_t, Compare> mMap; - std::vector<Operand>& mSlicedOperands; - std::vector<uint32_t>& mSlicedInputIndexes; - const Version kSlicedVersion; - const size_t kOperandValuesSize; - const std::vector<size_t> kPoolSizes; + hidl_vec<T_SlicedOperand>& mSlicedOperands; + hidl_vec<uint32_t>& mSlicedInputIndexes; }; +template <class T_SlicedModel> void MetaModel::processOperations( - Slice* slice, std::map<uint32_t, uint32_t>* origOperandIndexToSlicedIndex, - OrigOperandToSlicedInputOperandIndex* origOperandToSlicedInputOperandIndex, + Slice<T_SlicedModel>* slice, std::map<uint32_t, uint32_t>* origOperandIndexToSlicedIndex, + OrigOperandToSlicedInputOperandIndex<typename Slice<T_SlicedModel>::Operand>* + origOperandToSlicedInputOperandIndex, const std::set<uint32_t>& noncompliantOperations, const std::set<uint32_t>& inputOperandIndexesOfCompliantOperations) const { - const auto& origOperands = mModel.main.operands; - const auto& origOperations = mModel.main.operations; - auto& slicedOperands = slice->mModel.main.operands; - auto& slicedOperations = slice->mModel.main.operations; + using SlicedOperand = typename Slice<T_SlicedModel>::Operand; + using SlicedOperation = typename Slice<T_SlicedModel>::Operation; + using SlicedOperationType = typename Slice<T_SlicedModel>::OperationType; - std::vector<uint32_t> origOperandNumberOfConsumers = - countNumberOfConsumers(origOperands.size(), origOperations).value(); + const auto& origOperands = mHidlModel.main.operands; + const auto& origOperations = mHidlModel.main.operations; + auto& slicedOperands = slice->mHidlModel.operands; + auto& slicedOperations = slice->mHidlModel.operations; for (uint32_t origOperationIndex = 0; origOperationIndex < origOperations.size(); ++origOperationIndex) { @@ -265,14 +348,15 @@ (*origOperandIndexToSlicedIndex)[output] = slicedIndex; VLOG(COMPILATION) << "origOperandIndexToSlicedIndex noncompliant output processing created " - << output << " -> " << slicedIndex << ": " << slicedOperands[slicedIndex]; + << output << " -> " << slicedIndex << ": " + << toString(slicedOperands[slicedIndex]); } } else { slice->mSlicedOperationIndexToOrigIndex.push_back(origOperationIndex); - Operation& slicedOperation = *extend(&slicedOperations).second; + SlicedOperation& slicedOperation = *extend(&slicedOperations).second; CHECK_EQ(slice->mSlicedOperationIndexToOrigIndex.size(), slicedOperations.size()); - slicedOperation.type = origOperation.type; + slicedOperation.type = uncheckedConvertTo<SlicedOperationType>(origOperation.type); // Model is topologically sorted, so all operation inputs must be // present in origOperandIndexToSlicedIndex, and no operation @@ -280,6 +364,7 @@ // Operation inputs // - Fill in slicedOperation.inputs + // - Update number of consumers for each input operand slicedOperation.inputs.resize(origOperation.inputs.size()); std::transform( origOperation.inputs.begin(), origOperation.inputs.end(), @@ -287,10 +372,11 @@ [&origOperandIndexToSlicedIndex, &slicedOperands](uint32_t origOperandIndex) { uint32_t slicedOperandIndex = origOperandIndexToSlicedIndex->at(origOperandIndex); + slicedOperands[slicedOperandIndex].numberOfConsumers++; VLOG(COMPILATION) << "origOperandIndexToSlicedIndex compliant input " "processing created " << origOperandIndex << " -> " << slicedOperandIndex - << ": " << slicedOperands[slicedOperandIndex]; + << ": " << toString(slicedOperands[slicedOperandIndex]); return slicedOperandIndex; }); @@ -307,15 +393,17 @@ uint32_t slicedOperandIndex = firstOutputSlicedOperandIndex + outputNum; auto& slicedOperand = slicedOperands[slicedOperandIndex]; const auto& origOperand = origOperands[origOperandIndex]; - slicedOperand = origOperand; + slicedOperand = convertTo<SlicedOperand>(origOperand); + slicedOperand.numberOfConsumers = 0; CHECK_EQ(origOperandIndexToSlicedIndex->count(origOperandIndex), size_t(0)); (*origOperandIndexToSlicedIndex)[origOperandIndex] = slicedOperandIndex; slicedOperation.outputs[outputNum] = slicedOperandIndex; - const auto subgraphOutputLifetime = Operand::LifeTime::SUBGRAPH_OUTPUT; + const auto subgraphOutputLifetime = convertTo<decltype(slicedOperand.lifetime)>( + OperandLifeTime::SUBGRAPH_OUTPUT); if (!inputOperandIndexesOfCompliantOperations.count(origOperandIndex) && - origOperandNumberOfConsumers[origOperandIndex] != 0) { + origOperand.numberOfConsumers) { // Was consumed only by noncompliant operations; convert to // an output of the sliced model. slicedOperand.lifetime = subgraphOutputLifetime; @@ -323,92 +411,40 @@ VLOG(COMPILATION) << "origOperandIndexToSlicedIndex compliant output created " << origOperandIndex << " -> " << slicedOperandIndex << ": " - << slicedOperand; + << toString(slicedOperand); if (slicedOperand.lifetime == subgraphOutputLifetime) { - extend(&slice->mModel.main.outputIndexes, slicedOperandIndex); + extend(&slice->mHidlModel.outputIndexes, slicedOperandIndex); } } } } } -std::set<uint32_t> MetaModel::getNoncompliantOperations(Version version) const { - const auto [operandValuesSize, poolSizes] = getMemorySizes(mModel); +template <class T_SlicedModel> +MetaModel::Slice<T_SlicedModel> MetaModel::makeSlice() const { + using SlicedOperand = typename Slice<T_SlicedModel>::Operand; - auto subgraphVersionCache = createSubgraphVersionCache(mModel.referenced.size()); - std::set<uint32_t> noncompliantOperations; - for (uint32_t i = 0; i < mModel.main.operations.size(); ++i) { - const auto& operation = mModel.main.operations[i]; - const auto minSupportedVersion = - validateOperationAndAnythingItDependsOn( - operation, mModel.main.operands, operandValuesSize, poolSizes, - mModel.referenced, subgraphVersionCache.get()) - .value(); - if (minSupportedVersion > version) { - noncompliantOperations.insert(i); - } - } - return noncompliantOperations; -} + Slice<T_SlicedModel> slice; -MetaModel::Slice MetaModel::makeSlice(Version version) const { - Slice slice; - - // Quickly return if the model is already compliant with `version` - if (version >= mModelMinimumSupportedVersion) { - slice.mModel = mModel; - slice.mSlicedOperationIndexToOrigIndex = - std::vector<uint32_t>(mModel.main.operations.size()); - std::iota(slice.mSlicedOperationIndexToOrigIndex.begin(), - slice.mSlicedOperationIndexToOrigIndex.end(), 0u); - slice.mState = SliceState::NORMAL; - return slice; - } - - const auto& origOperands = mModel.main.operands; - const auto& origOperations = mModel.main.operations; - auto& slicedOperands = slice.mModel.main.operands; + const auto& origOperands = mHidlModel.main.operands; + const auto& origOperations = mHidlModel.main.operations; + auto& slicedOperands = slice.mHidlModel.operands; // Indexes of elements of noncompliant origOperations - std::set<uint32_t> noncompliantOperations = getNoncompliantOperations(version); - - // Check if any compliant operations require a subgraph. - bool someCompliantOperationHasASubgraphOperand = false; - if (!mModel.referenced.empty()) { - for (size_t i = 0; i < mModel.main.operations.size(); ++i) { - const auto& operation = mModel.main.operations[i]; - if (noncompliantOperations.count(i) > 0) { - continue; - } - const auto isSubgraph = [&origOperands](uint32_t opndIdx) { - return origOperands[opndIdx].lifetime == Operand::LifeTime::SUBGRAPH; - }; - if (std::any_of(operation.inputs.begin(), operation.inputs.end(), isSubgraph)) { - someCompliantOperationHasASubgraphOperand = true; - break; - } - } - } - - // TODO(b/175418767): Currently, MetaModel is not equipped to slice referenced subgraphs. If the - // original model is not compliant with the specified version and contains referenced subgraphs - // needed by the slice, return an invalidated slice. - if (someCompliantOperationHasASubgraphOperand) { - slice.mState = SliceState::INVALID; - return slice; - } + std::set<uint32_t> noncompliantOperations; + getNoncompliantOperations<T_SlicedModel>(mHidlModel, &noncompliantOperations); // Map from an operand index in origOperands to the corresponding operand index in // slicedOperands std::map<uint32_t, uint32_t> origOperandIndexToSlicedIndex; // Collect the operand indexes of every operand that is an input to a - // compliant operation. If the operand is a CONSTANT_*, POINTER, or a - // NO_VALUE, copy it to the sliced model and update - // origOperandIndexToSlicedIndex accordingly. Otherwise, we'll deal with - // the operand in the subsequent "Main loop", where we process operation - // outputs (intermediates and model outputs). + // compliant operation. If the operand is a CONSTANT_* or a NO_VALUE, copy + // it to the sliced model and update origOperandIndexToSlicedIndex + // accordingly. Otherwise, we'll deal with the operand in the subsequent + // "Main loop", where we process operation outputs (intermediates and model + // outputs). std::set<uint32_t> inputOperandIndexesOfCompliantOperations; for (uint32_t origOperationIndex = 0; origOperationIndex < origOperations.size(); ++origOperationIndex) { @@ -419,16 +455,17 @@ if (inputOperandIndexesOfCompliantOperations.insert(input).second) { const Operand& origOperand = origOperands[input]; switch (origOperand.lifetime) { - case Operand::LifeTime::CONSTANT_COPY: - case Operand::LifeTime::CONSTANT_REFERENCE: - case Operand::LifeTime::POINTER: - case Operand::LifeTime::NO_VALUE: { + case OperandLifeTime::CONSTANT_COPY: + case OperandLifeTime::CONSTANT_REFERENCE: + case OperandLifeTime::NO_VALUE: { const uint32_t slicedOperandIndex = - extend(&slicedOperands, origOperand).first; + extend(&slicedOperands, convertTo<SlicedOperand>(origOperand)) + .first; + slicedOperands[slicedOperandIndex].numberOfConsumers = 0; origOperandIndexToSlicedIndex[input] = slicedOperandIndex; VLOG(COMPILATION) << "origOperandIndexToSlicedIndex initialization created " << input << " -> " << slicedOperandIndex << ": " - << slicedOperands[slicedOperandIndex]; + << toString(slicedOperands[slicedOperandIndex]); break; } default: @@ -438,24 +475,21 @@ } } - const auto [operandValuesSize, poolSizes] = getMemorySizes(mModel); - OrigOperandToSlicedInputOperandIndex origOperandToSlicedInputOperandIndex( - &slicedOperands, &slice.mModel.main.inputIndexes, version, operandValuesSize, - poolSizes); + &slicedOperands, &slice.mHidlModel.inputIndexes); // An input of the original model is an input of the sliced model if and // only if it is consumed by at least one compliant operation. Note that in // the sliced model we share all model inputs of the same "type"; and that // we may later add model inputs to the sliced model. - for (uint32_t origInputIndex : mModel.main.inputIndexes) { + for (uint32_t origInputIndex : mHidlModel.main.inputIndexes) { if (inputOperandIndexesOfCompliantOperations.count(origInputIndex)) { const uint32_t slicedIndex = origOperandToSlicedInputOperandIndex.getIndex(origOperands[origInputIndex]); origOperandIndexToSlicedIndex[origInputIndex] = slicedIndex; VLOG(COMPILATION) << "origOperandIndexToSlicedIndex inputIndexes processing created " << origInputIndex << " -> " << slicedIndex << ": " - << slicedOperands[slicedIndex]; + << toString(slicedOperands[slicedIndex]); } } @@ -467,27 +501,30 @@ // opt to regenerate them based on the operands present in the sliced model: // This would be more complex and probably take more computation time, but // it would reduce the size of the sliced model, and hence the time spent - // copying it around and potentially passing it across process boundaries. - slice.mModel.operandValues = mModel.operandValues; - slice.mModel.pools = mModel.pools; + // copying it around and passing it across the HAL interface. + slice.mHidlModel.operandValues = mHidlModel.operandValues; + slice.mHidlModel.pools = mHidlModel.pools; if (VLOG_IS_ON(COMPILATION)) { { std::ostringstream fromName; - fromName << "Slice: From canonical"; - graphDump(fromName.str().c_str(), mModel); + fromName << "Slice: From " << ModelVersion<decltype(mHidlModel)>::name; + graphDump(fromName.str().c_str(), mHidlModel); } { std::ostringstream toName; - toName << "Slice: To " << version; - graphDump(toName.str().c_str(), slice.mModel); + toName << "Slice: To " << ModelVersion<decltype(slice.mHidlModel)>::name; + graphDump(toName.str().c_str(), convertToV1_3(slice.mHidlModel)); } } - slice.mState = invalid(slice.mModel, version, mStrictSlicing) ? SliceState::INVALID - : SliceState::NORMAL; + slice.mState = + invalid(slice.mHidlModel, mStrictSlicing) ? SliceState::INVALID : SliceState::NORMAL; return slice; } +template MetaModel::Slice<V1_0::Model> MetaModel::makeSlice() const; +template MetaModel::Slice<V1_1::Model> MetaModel::makeSlice() const; + } // namespace android::nn
diff --git a/common/OperationResolver.cpp b/common/OperationResolver.cpp index e6792b2..fce3af4 100644 --- a/common/OperationResolver.cpp +++ b/common/OperationResolver.cpp
@@ -23,6 +23,8 @@ namespace android { namespace nn { +using namespace hal; + // TODO(b/119608412): Find a way to not reference every operation here. const OperationRegistration* register_ABS(); const OperationRegistration* register_ADD();
diff --git a/common/OperationsUtils.cpp b/common/OperationsUtils.cpp index dfd2f3b..e8dd3e2 100644 --- a/common/OperationsUtils.cpp +++ b/common/OperationsUtils.cpp
@@ -24,14 +24,16 @@ #include <sstream> #include <vector> -#include "LegacyUtils.h" #include "Operations.h" +#include "Utils.h" namespace android { namespace nn { namespace { +using namespace hal; + bool validateOperandTypes(const std::vector<OperandType>& expectedTypes, const char* tag, uint32_t operandCount, std::function<OperandType(uint32_t)> getOperandType) { @@ -39,8 +41,8 @@ for (uint32_t i = 0; i < operandCount; ++i) { OperandType type = getOperandType(i); NN_RET_CHECK(type == expectedTypes[i]) - << "Invalid " << tag << " tensor type " << type << " for " << tag << " " << i - << ", expected " << expectedTypes[i]; + << "Invalid " << tag << " tensor type " << toString(type) << " for " << tag << " " + << i << ", expected " << toString(expectedTypes[i]); } return true; } @@ -86,26 +88,26 @@ [context](uint32_t index) { return context->getOutputType(index); }); } -bool validateVersion(const IOperationValidationContext* context, Version contextVersion, - Version minSupportedVersion) { - if (contextVersion < minSupportedVersion) { +bool validateHalVersion(const IOperationValidationContext* context, + HalVersion minSupportedHalVersion) { + if (context->getHalVersion() < minSupportedHalVersion) { std::ostringstream message; message << "Operation " << context->getOperationName() << " with inputs {"; for (uint32_t i = 0, n = context->getNumInputs(); i < n; ++i) { if (i != 0) { message << ", "; } - message << context->getInputType(i); + message << toString(context->getInputType(i)); } message << "} and outputs {"; for (uint32_t i = 0, n = context->getNumOutputs(); i < n; ++i) { if (i != 0) { message << ", "; } - message << context->getOutputType(i); + message << toString(context->getOutputType(i)); } - message << "} is only supported since " << minSupportedVersion << " (validating using " - << contextVersion << ")"; + message << "} is only supported since " << toString(minSupportedHalVersion) + << " (validating using " << toString(context->getHalVersion()) << ")"; NN_RET_CHECK_FAIL() << message.str(); } return true; @@ -354,7 +356,7 @@ if (dim1 != dim2 && dim1 != 1 && dim2 != 1) { LOG(ERROR) << "Dimensions mismatch for broadcast:\n" << "First tensor: dimension " << numberOfDims1 - i << " of size " << dim1 - << "\nSecond tensor: dimension " << numberOfDims2 - i << " of size " << dim2; + << "\nSecond tensor: dimension " << numberOfDims2 - i << "of size " << dim2; return false; } out->dimensions[maxDims - i] = (dim1 == 1) ? dim2 : dim1; @@ -461,7 +463,9 @@ NN_OPS_CHECK(getNumberOfDimensions(valueShape) >= 2); NN_OPS_CHECK(getNumberOfDimensions(lookupShape) == 1); + const uint32_t rows = getSizeOfDimension(valueShape, 0); const uint32_t columns = getSizeOfDimension(valueShape, 1); + const uint32_t lookups = getSizeOfDimension(lookupShape, 0); outputShape->type = valueShape.type; @@ -482,6 +486,8 @@ NN_OPS_CHECK(getNumberOfDimensions(valueShape) >= 1); const uint32_t lookups = getSizeOfDimension(lookupShape, 0); + const uint32_t keys = getSizeOfDimension(keyShape, 0); + const uint32_t rows = getSizeOfDimension(valueShape, 0); outputShape->type = valueShape.type; outputShape->dimensions = {lookups}; for (uint32_t i = 1; i < getNumberOfDimensions(valueShape); i++) {
diff --git a/common/QuantUtils.h b/common/QuantUtils.h index f6bfd16..3da27e9 100644 --- a/common/QuantUtils.h +++ b/common/QuantUtils.h
@@ -10,8 +10,8 @@ #include <limits> #include <memory> -#include "LegacyUtils.h" #include "OperationsUtils.h" +#include "Utils.h" namespace android { namespace nn {
diff --git a/common/SharedMemory.cpp b/common/SharedMemory.cpp deleted file mode 100644 index 8b80430..0000000 --- a/common/SharedMemory.cpp +++ /dev/null
@@ -1,88 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "SharedMemory.h" - -#include <android-base/logging.h> - -#include <limits> -#include <optional> -#include <utility> -#include <variant> -#include <vector> - -#include "Result.h" -#include "TypeUtils.h" -#include "Types.h" - -namespace android::nn { - -MutableMemoryBuilder::MutableMemoryBuilder(uint32_t poolIndex) : mPoolIndex(poolIndex) {} - -DataLocation MutableMemoryBuilder::append(size_t length, size_t alignment, size_t padding) { - CHECK_GT(length, 0u); - mSize = roundUp(mSize, alignment); - const size_t offset = mSize; - const size_t paddedLength = roundUp(length, padding); - CHECK_LE(offset, std::numeric_limits<uint32_t>::max()); - CHECK_LE(paddedLength, std::numeric_limits<uint32_t>::max()); - mSize += paddedLength; - return {.poolIndex = mPoolIndex, - .offset = static_cast<uint32_t>(offset), - .length = static_cast<uint32_t>(length), - .padding = static_cast<uint32_t>(paddedLength - length)}; -} - -bool MutableMemoryBuilder::empty() const { - return mSize == 0; -} - -GeneralResult<SharedMemory> MutableMemoryBuilder::finish() { - return createSharedMemory(mSize); -} - -ConstantMemoryBuilder::ConstantMemoryBuilder(uint32_t poolIndex) : mBuilder(poolIndex) {} - -DataLocation ConstantMemoryBuilder::append(const void* data, size_t length) { - const auto location = mBuilder.append(length); - CHECK_EQ(location.length, length); - mSlices.push_back({.data = data, .length = length, .offset = location.offset}); - return location; -} - -bool ConstantMemoryBuilder::empty() const { - return mBuilder.empty(); -} - -GeneralResult<SharedMemory> ConstantMemoryBuilder::finish() { - // Allocate the memory. - auto memory = NN_TRY(mBuilder.finish()); - - // Map the memory. - const auto [pointer, size, context] = NN_TRY(map(memory);); - - // Get mutable pointer. - uint8_t* mutablePointer = static_cast<uint8_t*>(std::get<void*>(pointer)); - - // Copy data to the memory pool. - std::for_each(mSlices.begin(), mSlices.end(), [mutablePointer](const auto& slice) { - std::memcpy(mutablePointer + slice.offset, slice.data, slice.length); - }); - - return memory; -} - -} // namespace android::nn
diff --git a/common/SharedMemoryAndroid.cpp b/common/SharedMemoryAndroid.cpp deleted file mode 100644 index 4730de3..0000000 --- a/common/SharedMemoryAndroid.cpp +++ /dev/null
@@ -1,322 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "SharedMemoryAndroid" - -#include <android-base/logging.h> -#include <android-base/mapped_file.h> -#include <android-base/scopeguard.h> -#include <android/hardware_buffer.h> - -#include <algorithm> -#include <any> -#include <iterator> -#include <limits> -#include <memory> -#include <string> -#include <utility> -#include <variant> -#include <vector> - -#include "Result.h" -#include "SharedMemory.h" -#include "TypeUtils.h" -#include "Types.h" - -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD -#include <android/hidl/allocator/1.0/IAllocator.h> -#include <hidl/HidlSupport.h> -#include <hidlmemory/mapping.h> -#include <sys/mman.h> -#else -#include "DynamicCLDeps.h" -#endif // NN_COMPATIBILITY_LIBRARY_BUILD - -namespace android::nn { -namespace { - -GeneralResult<SharedMemory> createSharedMemoryFromUniqueFd(size_t size, int prot, - base::unique_fd fd, size_t offset) { - auto handle = Memory::Fd{ - .size = size, - .prot = prot, - .fd = std::move(fd), - .offset = offset, - }; - return std::make_shared<const Memory>(Memory{.handle = std::move(handle)}); -} - -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD - -using ::android::hardware::hidl_memory; -using ::android::hidl::allocator::V1_0::IAllocator; - -const char* const kAllocatorService = "ashmem"; - -GeneralResult<hardware::hidl_handle> hidlHandleFromUniqueFd(base::unique_fd fd) { - native_handle_t* nativeHandle = native_handle_create(1, 0); - if (nativeHandle == nullptr) { - return NN_ERROR() << "Failed to create native_handle"; - } - nativeHandle->data[0] = fd.release(); - - hardware::hidl_handle hidlHandle; - hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true); - return hidlHandle; -} - -GeneralResult<SharedMemory> allocateSharedMemory(size_t size) { - static const auto allocator = IAllocator::getService(kAllocatorService); - CHECK_GT(size, 0u); - - hidl_memory maybeMemory; - auto fn = [&maybeMemory](bool success, const hidl_memory& memory) { - if (success) { - maybeMemory = memory; - } - }; - allocator->allocate(size, fn); - - if (!maybeMemory.valid()) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "IAllocator::allocate returned an invalid (empty) memory object"; - } - if (maybeMemory.handle()->numFds != 1) { - return NN_ERROR() << "IAllocator::allocate returned an invalid memory object with " - << maybeMemory.handle()->numFds << " numFds, but expected 1"; - } - if (maybeMemory.handle()->numInts != 0) { - return NN_ERROR() << "IAllocator::allocate returned an invalid memory object with " - << maybeMemory.handle()->numInts << " numInts, but expected 0"; - } - - CHECK_LE(maybeMemory.size(), std::numeric_limits<size_t>::max()); - const int fd = maybeMemory.handle()->data[0]; - - auto handle = Memory::Ashmem{ - .fd = NN_TRY(dupFd(fd)), - .size = static_cast<size_t>(maybeMemory.size()), - }; - return std::make_shared<const Memory>(Memory{.handle = std::move(handle)}); -} - -GeneralResult<Mapping> map(const Memory::Ashmem& memory) { - auto handle = NN_TRY(hidlHandleFromUniqueFd(NN_TRY(dupFd(memory.fd)))); - const auto hidlMemory = hidl_memory("ashmem", std::move(handle), memory.size); - - const auto mapping = mapMemory(hidlMemory); - if (mapping == nullptr) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to map memory"; - } - - auto* const pointer = mapping->getPointer().withDefault(nullptr); - if (pointer == nullptr) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to get the mapped pointer"; - } - - const auto fullSize = mapping->getSize().withDefault(0); - if (fullSize == 0 || fullSize > std::numeric_limits<size_t>::max()) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to get the mapped size"; - } - - const size_t size = static_cast<size_t>(fullSize); - - return Mapping{ - .pointer = pointer, - .size = size, - .context = mapping, - }; -} - -#else - -GeneralResult<SharedMemory> allocateSharedMemory(size_t size) { - CHECK_GT(size, 0u); - - const CompatibilityLayerMemory& memory = loadCompatibilityLayerMemory(); - auto fd = base::unique_fd(memory.create(nullptr, size)); - if (!fd.ok()) { - return NN_ERROR() << "ASharedMemory_create failed"; - } - - const size_t readSize = memory.getSize(fd.get()); - CHECK_GE(readSize, size); - - constexpr int prot = PROT_READ | PROT_WRITE; - constexpr size_t offset = 0; - return createSharedMemoryFromUniqueFd(size, prot, std::move(fd), offset); -} - -GeneralResult<Mapping> map(const Memory::Ashmem& /*memory*/) { - return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Cannot map ashmem memory"; -} - -#endif // NN_COMPATIBILITY_LIBRARY_BUILD - -size_t getSize(const Memory::Ashmem& memory) { - return memory.size; -} - -size_t getSize(const Memory::Fd& memory) { - return memory.size; -} - -size_t getSize(const Memory::HardwareBuffer& memory) { - AHardwareBuffer_Desc desc; - AHardwareBuffer_describe(memory.handle.get(), &desc); - return desc.format == AHARDWAREBUFFER_FORMAT_BLOB ? desc.width : 0; -} - -size_t getSize(const Memory::Unknown& memory) { - return memory.size; -} - -struct MmapFdMappingContext { - int prot; - std::any context; -}; - -GeneralResult<Mapping> map(const Memory::Fd& memory) { - std::shared_ptr<base::MappedFile> mapping = - base::MappedFile::FromFd(memory.fd, memory.offset, memory.size, memory.prot); - if (mapping == nullptr) { - return NN_ERROR() << "Can't mmap the file descriptor."; - } - char* data = mapping->data(); - - const bool writable = (memory.prot & PROT_WRITE) != 0; - std::variant<const void*, void*> pointer; - if (writable) { - pointer = static_cast<void*>(data); - } else { - pointer = static_cast<const void*>(data); - } - - auto context = MmapFdMappingContext{.prot = memory.prot, .context = std::move(mapping)}; - return Mapping{.pointer = pointer, .size = memory.size, .context = std::move(context)}; -} - -GeneralResult<Mapping> map(const Memory::HardwareBuffer& memory) { - AHardwareBuffer_Desc desc; - AHardwareBuffer_describe(memory.handle.get(), &desc); - - if (desc.format != AHARDWAREBUFFER_FORMAT_BLOB) { - return NN_ERROR() << "Unable to map non-blob AHardwareBuffer memory"; - } - const uint32_t size = desc.width; - - const uint64_t kCpuUsageMask = - AHARDWAREBUFFER_USAGE_CPU_READ_MASK | AHARDWAREBUFFER_USAGE_CPU_WRITE_MASK; - void* data = nullptr; - const auto status = AHardwareBuffer_lock(memory.handle.get(), desc.usage & kCpuUsageMask, -1, - nullptr, &data); - if (status != /*NO_ERROR*/ 0) { - return NN_ERROR() << "Can't lock the AHardwareBuffer. Error: " << status; - } - - // Create shared scoped object to munmap. - auto scoped = base::make_scope_guard( - [ahwb = memory.handle.get()] { AHardwareBuffer_unlock(ahwb, nullptr); }); - auto sharedScoped = std::make_shared<decltype(scoped)>(std::move(scoped)); - - return Mapping{.pointer = data, .size = size, .context = std::move(sharedScoped)}; -} - -GeneralResult<Mapping> map(const Memory::Unknown& /*memory*/) { - return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Cannot map Unknown memory"; -} - -void freeHardwareBuffer(AHardwareBuffer* buffer) { - if (buffer) { - AHardwareBuffer_release(buffer); - } -} - -void freeNoop(AHardwareBuffer* /*buffer*/) {} - -} // namespace - -GeneralResult<base::unique_fd> dupFd(int fd) { - if (fd < 0) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "dupFd was passed an invalid fd"; - } - auto uniqueFd = base::unique_fd(dup(fd)); - if (!uniqueFd.ok()) { - // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return here? - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd"; - } - return uniqueFd; -} - -GeneralResult<SharedMemory> createSharedMemory(size_t size) { - return allocateSharedMemory(size); -} - -GeneralResult<SharedMemory> createSharedMemoryFromFd(size_t size, int prot, int fd, size_t offset) { - return createSharedMemoryFromUniqueFd(size, prot, NN_TRY(dupFd(fd)), offset); -} - -GeneralResult<SharedMemory> createSharedMemoryFromAHWB(AHardwareBuffer* ahwb, bool takeOwnership) { - CHECK(ahwb != nullptr); - const Memory::HardwareBuffer::Deleter deleter = (takeOwnership ? freeHardwareBuffer : freeNoop); - Memory::HardwareBuffer handle = {.handle = Memory::HardwareBuffer::Handle(ahwb, deleter)}; - return std::make_shared<const Memory>(Memory{.handle = std::move(handle)}); -} - -size_t getSize(const SharedMemory& memory) { - CHECK(memory != nullptr); - return std::visit([](const auto& x) { return getSize(x); }, memory->handle); -} - -bool isAhwbBlob(const Memory::HardwareBuffer& memory) { - AHardwareBuffer* ahwb = memory.handle.get(); - AHardwareBuffer_Desc desc; - AHardwareBuffer_describe(ahwb, &desc); - return desc.format == AHARDWAREBUFFER_FORMAT_BLOB; -} - -bool isAhwbBlob(const SharedMemory& memory) { - CHECK(memory != nullptr); - if (!std::holds_alternative<Memory::HardwareBuffer>(memory->handle)) { - return false; - } - return isAhwbBlob(std::get<Memory::HardwareBuffer>(memory->handle)); -} - -GeneralResult<Mapping> map(const SharedMemory& memory) { - if (memory == nullptr) { - return NN_ERROR() << "Unable to map nullptr SharedMemory object"; - } - return std::visit([](const auto& x) { return map(x); }, memory->handle); -} - -bool flush(const Mapping& mapping) { - if (const auto* mmapFdMapping = std::any_cast<MmapFdMappingContext>(&mapping.context)) { - if (!std::holds_alternative<void*>(mapping.pointer)) { - return true; - } - void* data = std::get<void*>(mapping.pointer); - const int prot = mmapFdMapping->prot; - if (prot & PROT_WRITE) { - const size_t size = mapping.size; - return msync(data, size, MS_SYNC) == 0; - } - } - // No-op for other types of memory. - return true; -} - -} // namespace android::nn
diff --git a/common/TokenHasher.cpp b/common/TokenHasher.cpp index 2dd870b..76df02a 100644 --- a/common/TokenHasher.cpp +++ b/common/TokenHasher.cpp
@@ -18,11 +18,11 @@ #include "TokenHasher.h" +#include "NeuralNetworks.h" + #include <android-base/logging.h> #include <openssl/sha.h> -#include "NeuralNetworks.h" - namespace android { namespace nn {
diff --git a/common/TypeUtils.cpp b/common/TypeUtils.cpp deleted file mode 100644 index bfb1fb9..0000000 --- a/common/TypeUtils.cpp +++ /dev/null
@@ -1,1011 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "TypeUtils.h" - -#include <android-base/logging.h> - -#include <algorithm> -#include <chrono> -#include <limits> -#include <memory> -#include <ostream> -#include <type_traits> -#include <utility> -#include <vector> - -#include "OperandTypes.h" -#include "OperationTypes.h" -#include "Result.h" -#include "SharedMemory.h" -#include "Types.h" - -namespace android::nn { -namespace { - -template <typename Type> -constexpr std::underlying_type_t<Type> underlyingType(Type object) { - return static_cast<std::underlying_type_t<Type>>(object); -} - -template <typename Type> -std::ostream& operator<<(std::ostream& os, const std::vector<Type>& vec) { - constexpr size_t kMaxVectorPrint = 20; - os << "["; - size_t count = 0; - for (const auto& element : vec) { - if (count > 0) { - os << ", "; - } - os << element; - count++; - if (count >= kMaxVectorPrint) { - return os << "...]"; - } - } - return os << "]"; -} - -} // namespace - -bool isExtension(OperandType type) { - return getExtensionPrefix(underlyingType(type)) != 0; -} - -bool isExtension(OperationType type) { - return getExtensionPrefix(underlyingType(type)) != 0; -} - -bool isNonExtensionScalar(OperandType operandType) { - CHECK(!isExtension(operandType)); - switch (operandType) { - case OperandType::FLOAT32: - case OperandType::INT32: - case OperandType::UINT32: - case OperandType::BOOL: - case OperandType::FLOAT16: - case OperandType::SUBGRAPH: - case OperandType::OEM: - return true; - case OperandType::TENSOR_FLOAT32: - case OperandType::TENSOR_INT32: - case OperandType::TENSOR_QUANT8_ASYMM: - case OperandType::TENSOR_QUANT16_SYMM: - case OperandType::TENSOR_FLOAT16: - case OperandType::TENSOR_BOOL8: - case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: - case OperandType::TENSOR_QUANT16_ASYMM: - case OperandType::TENSOR_QUANT8_SYMM: - case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: - case OperandType::TENSOR_OEM_BYTE: - return false; - } - return false; -} - -size_t getNonExtensionSize(OperandType operandType) { - CHECK(!isExtension(operandType)); - switch (operandType) { - case OperandType::SUBGRAPH: - case OperandType::OEM: - return 0; - case OperandType::TENSOR_QUANT8_ASYMM: - case OperandType::BOOL: - case OperandType::TENSOR_BOOL8: - case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: - case OperandType::TENSOR_QUANT8_SYMM: - case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: - case OperandType::TENSOR_OEM_BYTE: - return 1; - case OperandType::TENSOR_QUANT16_SYMM: - case OperandType::TENSOR_FLOAT16: - case OperandType::FLOAT16: - case OperandType::TENSOR_QUANT16_ASYMM: - return 2; - case OperandType::FLOAT32: - case OperandType::INT32: - case OperandType::UINT32: - case OperandType::TENSOR_FLOAT32: - case OperandType::TENSOR_INT32: - return 4; - } - return 0; -} - -std::optional<size_t> getNonExtensionSize(OperandType operandType, const Dimensions& dimensions) { - CHECK(!isExtension(operandType)) << "Size of extension operand data is unknown"; - size_t size = getNonExtensionSize(operandType); - if (isNonExtensionScalar(operandType)) { - return size; - } else if (dimensions.empty()) { - return 0; - } - for (Dimension dimension : dimensions) { - if (dimension != 0 && size > std::numeric_limits<size_t>::max() / dimension) { - return std::nullopt; - } - size *= dimension; - } - return size; -} - -std::optional<size_t> getNonExtensionSize(const Operand& operand) { - return getNonExtensionSize(operand.type, operand.dimensions); -} - -size_t getOffsetFromInts(int lower, int higher) { - const int32_t lowBits = static_cast<int32_t>(lower); - const int32_t highBits = static_cast<int32_t>(higher); - const uint32_t lowOffsetBits = *reinterpret_cast<const uint32_t*>(&lowBits); - const uint32_t highOffsetBits = *reinterpret_cast<const uint32_t*>(&highBits); - const uint64_t offset = lowOffsetBits | (static_cast<uint64_t>(highOffsetBits) << 32); - return offset; -} - -std::pair<int32_t, int32_t> getIntsFromOffset(size_t offset) { - const uint64_t bits = static_cast<uint64_t>(offset); - const uint32_t lowBits = static_cast<uint32_t>(bits & 0xffffffff); - const uint32_t highBits = static_cast<uint32_t>(bits >> 32); - const int32_t lowOffsetBits = *reinterpret_cast<const int32_t*>(&lowBits); - const int32_t highOffsetBits = *reinterpret_cast<const int32_t*>(&highBits); - return std::make_pair(lowOffsetBits, highOffsetBits); -} - -Result<std::vector<uint32_t>> countNumberOfConsumers(size_t numberOfOperands, - const std::vector<nn::Operation>& operations) { - std::vector<uint32_t> numberOfConsumers(numberOfOperands, 0); - for (const auto& operation : operations) { - for (uint32_t operandIndex : operation.inputs) { - if (operandIndex >= numberOfConsumers.size()) { - return NN_ERROR() - << "countNumberOfConsumers: tried to access out-of-bounds operand (" - << operandIndex << " vs " << numberOfConsumers.size() << ")"; - } - numberOfConsumers[operandIndex]++; - } - } - return numberOfConsumers; -} - -Result<Dimensions> combineDimensions(const Dimensions& lhs, const Dimensions& rhs) { - if (rhs.empty()) return lhs; - if (lhs.empty()) return rhs; - if (lhs.size() != rhs.size()) { - std::ostringstream os; - os << "Incompatible ranks: " << lhs << " and " << rhs; - return NN_ERROR() << os.str(); - } - Dimensions combined = lhs; - for (size_t i = 0; i < lhs.size(); i++) { - if (lhs[i] == 0) { - combined[i] = rhs[i]; - } else if (rhs[i] != 0 && lhs[i] != rhs[i]) { - std::ostringstream os; - os << "Incompatible dimensions: " << lhs << " and " << rhs; - return NN_ERROR() << os.str(); - } - } - return combined; -} - -std::pair<size_t, std::vector<size_t>> getMemorySizes(const Model& model) { - const size_t operandValuesSize = model.operandValues.size(); - - std::vector<size_t> poolSizes; - poolSizes.reserve(model.pools.size()); - std::transform(model.pools.begin(), model.pools.end(), std::back_inserter(poolSizes), - [](const SharedMemory& memory) { return getSize(memory); }); - - return std::make_pair(operandValuesSize, std::move(poolSizes)); -} - -size_t roundUp(size_t size, size_t multiple) { - CHECK(multiple != 0); - CHECK((multiple & (multiple - 1)) == 0) << multiple << " is not a power of two"; - return (size + (multiple - 1)) & ~(multiple - 1); -} - -size_t getAlignmentForLength(size_t length) { - if (length < 2) { - return 1; // No alignment necessary - } else if (length < 4) { - return 2; // Align on 2-byte boundary - } else { - return 4; // Align on 4-byte boundary - } -} - -std::ostream& operator<<(std::ostream& os, const DeviceStatus& deviceStatus) { - switch (deviceStatus) { - case DeviceStatus::AVAILABLE: - return os << "AVAILABLE"; - case DeviceStatus::BUSY: - return os << "BUSY"; - case DeviceStatus::OFFLINE: - return os << "OFFLINE"; - case DeviceStatus::UNKNOWN: - return os << "UNKNOWN"; - } - return os << "DeviceStatus{" << underlyingType(deviceStatus) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const ExecutionPreference& executionPreference) { - switch (executionPreference) { - case ExecutionPreference::LOW_POWER: - return os << "LOW_POWER"; - case ExecutionPreference::FAST_SINGLE_ANSWER: - return os << "FAST_SINGLE_ANSWER"; - case ExecutionPreference::SUSTAINED_SPEED: - return os << "SUSTAINED_SPEED"; - } - return os << "ExecutionPreference{" << underlyingType(executionPreference) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const DeviceType& deviceType) { - switch (deviceType) { - case DeviceType::UNKNOWN: - return os << "UNKNOWN"; - case DeviceType::OTHER: - return os << "OTHER"; - case DeviceType::CPU: - return os << "CPU"; - case DeviceType::GPU: - return os << "GPU"; - case DeviceType::ACCELERATOR: - return os << "ACCELERATOR"; - } - return os << "DeviceType{" << underlyingType(deviceType) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const MeasureTiming& measureTiming) { - switch (measureTiming) { - case MeasureTiming::NO: - return os << "NO"; - case MeasureTiming::YES: - return os << "YES"; - } - return os << "MeasureTiming{" << underlyingType(measureTiming) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const OperandType& operandType) { - switch (operandType) { - case OperandType::FLOAT32: - return os << "FLOAT32"; - case OperandType::INT32: - return os << "INT32"; - case OperandType::UINT32: - return os << "UINT32"; - case OperandType::TENSOR_FLOAT32: - return os << "TENSOR_FLOAT32"; - case OperandType::TENSOR_INT32: - return os << "TENSOR_INT32"; - case OperandType::TENSOR_QUANT8_ASYMM: - return os << "TENSOR_QUANT8_ASYMM"; - case OperandType::BOOL: - return os << "BOOL"; - case OperandType::TENSOR_QUANT16_SYMM: - return os << "TENSOR_QUANT16_SYMM"; - case OperandType::TENSOR_FLOAT16: - return os << "TENSOR_FLOAT16"; - case OperandType::TENSOR_BOOL8: - return os << "TENSOR_BOOL8"; - case OperandType::FLOAT16: - return os << "FLOAT16"; - case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: - return os << "TENSOR_QUANT8_SYMM_PER_CHANNEL"; - case OperandType::TENSOR_QUANT16_ASYMM: - return os << "TENSOR_QUANT16_ASYMM"; - case OperandType::TENSOR_QUANT8_SYMM: - return os << "TENSOR_QUANT8_SYMM"; - case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: - return os << "TENSOR_QUANT8_ASYMM_SIGNED"; - case OperandType::SUBGRAPH: - return os << "SUBGRAPH"; - case OperandType::OEM: - return os << "OEM"; - case OperandType::TENSOR_OEM_BYTE: - return os << "TENSOR_OEM_BYTE"; - } - if (isExtension(operandType)) { - return os << "Extension OperandType " << underlyingType(operandType); - } - return os << "OperandType{" << underlyingType(operandType) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Operand::LifeTime& lifetime) { - switch (lifetime) { - case Operand::LifeTime::TEMPORARY_VARIABLE: - return os << "TEMPORARY_VARIABLE"; - case Operand::LifeTime::SUBGRAPH_INPUT: - return os << "SUBGRAPH_INPUT"; - case Operand::LifeTime::SUBGRAPH_OUTPUT: - return os << "SUBGRAPH_OUTPUT"; - case Operand::LifeTime::CONSTANT_COPY: - return os << "CONSTANT_COPY"; - case Operand::LifeTime::CONSTANT_REFERENCE: - return os << "CONSTANT_REFERENCE"; - case Operand::LifeTime::NO_VALUE: - return os << "NO_VALUE"; - case Operand::LifeTime::SUBGRAPH: - return os << "SUBGRAPH"; - case Operand::LifeTime::POINTER: - return os << "POINTER"; - } - return os << "Operand::LifeTime{" << underlyingType(lifetime) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const OperationType& operationType) { - switch (operationType) { - case OperationType::ADD: - return os << "ADD"; - case OperationType::AVERAGE_POOL_2D: - return os << "AVERAGE_POOL_2D"; - case OperationType::CONCATENATION: - return os << "CONCATENATION"; - case OperationType::CONV_2D: - return os << "CONV_2D"; - case OperationType::DEPTHWISE_CONV_2D: - return os << "DEPTHWISE_CONV_2D"; - case OperationType::DEPTH_TO_SPACE: - return os << "DEPTH_TO_SPACE"; - case OperationType::DEQUANTIZE: - return os << "DEQUANTIZE"; - case OperationType::EMBEDDING_LOOKUP: - return os << "EMBEDDING_LOOKUP"; - case OperationType::FLOOR: - return os << "FLOOR"; - case OperationType::FULLY_CONNECTED: - return os << "FULLY_CONNECTED"; - case OperationType::HASHTABLE_LOOKUP: - return os << "HASHTABLE_LOOKUP"; - case OperationType::L2_NORMALIZATION: - return os << "L2_NORMALIZATION"; - case OperationType::L2_POOL_2D: - return os << "L2_POOL_2D"; - case OperationType::LOCAL_RESPONSE_NORMALIZATION: - return os << "LOCAL_RESPONSE_NORMALIZATION"; - case OperationType::LOGISTIC: - return os << "LOGISTIC"; - case OperationType::LSH_PROJECTION: - return os << "LSH_PROJECTION"; - case OperationType::LSTM: - return os << "LSTM"; - case OperationType::MAX_POOL_2D: - return os << "MAX_POOL_2D"; - case OperationType::MUL: - return os << "MUL"; - case OperationType::RELU: - return os << "RELU"; - case OperationType::RELU1: - return os << "RELU1"; - case OperationType::RELU6: - return os << "RELU6"; - case OperationType::RESHAPE: - return os << "RESHAPE"; - case OperationType::RESIZE_BILINEAR: - return os << "RESIZE_BILINEAR"; - case OperationType::RNN: - return os << "RNN"; - case OperationType::SOFTMAX: - return os << "SOFTMAX"; - case OperationType::SPACE_TO_DEPTH: - return os << "SPACE_TO_DEPTH"; - case OperationType::SVDF: - return os << "SVDF"; - case OperationType::TANH: - return os << "TANH"; - case OperationType::BATCH_TO_SPACE_ND: - return os << "BATCH_TO_SPACE_ND"; - case OperationType::DIV: - return os << "DIV"; - case OperationType::MEAN: - return os << "MEAN"; - case OperationType::PAD: - return os << "PAD"; - case OperationType::SPACE_TO_BATCH_ND: - return os << "SPACE_TO_BATCH_ND"; - case OperationType::SQUEEZE: - return os << "SQUEEZE"; - case OperationType::STRIDED_SLICE: - return os << "STRIDED_SLICE"; - case OperationType::SUB: - return os << "SUB"; - case OperationType::TRANSPOSE: - return os << "TRANSPOSE"; - case OperationType::ABS: - return os << "ABS"; - case OperationType::ARGMAX: - return os << "ARGMAX"; - case OperationType::ARGMIN: - return os << "ARGMIN"; - case OperationType::AXIS_ALIGNED_BBOX_TRANSFORM: - return os << "AXIS_ALIGNED_BBOX_TRANSFORM"; - case OperationType::BIDIRECTIONAL_SEQUENCE_LSTM: - return os << "BIDIRECTIONAL_SEQUENCE_LSTM"; - case OperationType::BIDIRECTIONAL_SEQUENCE_RNN: - return os << "BIDIRECTIONAL_SEQUENCE_RNN"; - case OperationType::BOX_WITH_NMS_LIMIT: - return os << "BOX_WITH_NMS_LIMIT"; - case OperationType::CAST: - return os << "CAST"; - case OperationType::CHANNEL_SHUFFLE: - return os << "CHANNEL_SHUFFLE"; - case OperationType::DETECTION_POSTPROCESSING: - return os << "DETECTION_POSTPROCESSING"; - case OperationType::EQUAL: - return os << "EQUAL"; - case OperationType::EXP: - return os << "EXP"; - case OperationType::EXPAND_DIMS: - return os << "EXPAND_DIMS"; - case OperationType::GATHER: - return os << "GATHER"; - case OperationType::GENERATE_PROPOSALS: - return os << "GENERATE_PROPOSALS"; - case OperationType::GREATER: - return os << "GREATER"; - case OperationType::GREATER_EQUAL: - return os << "GREATER_EQUAL"; - case OperationType::GROUPED_CONV_2D: - return os << "GROUPED_CONV_2D"; - case OperationType::HEATMAP_MAX_KEYPOINT: - return os << "HEATMAP_MAX_KEYPOINT"; - case OperationType::INSTANCE_NORMALIZATION: - return os << "INSTANCE_NORMALIZATION"; - case OperationType::LESS: - return os << "LESS"; - case OperationType::LESS_EQUAL: - return os << "LESS_EQUAL"; - case OperationType::LOG: - return os << "LOG"; - case OperationType::LOGICAL_AND: - return os << "LOGICAL_AND"; - case OperationType::LOGICAL_NOT: - return os << "LOGICAL_NOT"; - case OperationType::LOGICAL_OR: - return os << "LOGICAL_OR"; - case OperationType::LOG_SOFTMAX: - return os << "LOG_SOFTMAX"; - case OperationType::MAXIMUM: - return os << "MAXIMUM"; - case OperationType::MINIMUM: - return os << "MINIMUM"; - case OperationType::NEG: - return os << "NEG"; - case OperationType::NOT_EQUAL: - return os << "NOT_EQUAL"; - case OperationType::PAD_V2: - return os << "PAD_V2"; - case OperationType::POW: - return os << "POW"; - case OperationType::PRELU: - return os << "PRELU"; - case OperationType::QUANTIZE: - return os << "QUANTIZE"; - case OperationType::QUANTIZED_16BIT_LSTM: - return os << "QUANTIZED_16BIT_LSTM"; - case OperationType::RANDOM_MULTINOMIAL: - return os << "RANDOM_MULTINOMIAL"; - case OperationType::REDUCE_ALL: - return os << "REDUCE_ALL"; - case OperationType::REDUCE_ANY: - return os << "REDUCE_ANY"; - case OperationType::REDUCE_MAX: - return os << "REDUCE_MAX"; - case OperationType::REDUCE_MIN: - return os << "REDUCE_MIN"; - case OperationType::REDUCE_PROD: - return os << "REDUCE_PROD"; - case OperationType::REDUCE_SUM: - return os << "REDUCE_SUM"; - case OperationType::ROI_ALIGN: - return os << "ROI_ALIGN"; - case OperationType::ROI_POOLING: - return os << "ROI_POOLING"; - case OperationType::RSQRT: - return os << "RSQRT"; - case OperationType::SELECT: - return os << "SELECT"; - case OperationType::SIN: - return os << "SIN"; - case OperationType::SLICE: - return os << "SLICE"; - case OperationType::SPLIT: - return os << "SPLIT"; - case OperationType::SQRT: - return os << "SQRT"; - case OperationType::TILE: - return os << "TILE"; - case OperationType::TOPK_V2: - return os << "TOPK_V2"; - case OperationType::TRANSPOSE_CONV_2D: - return os << "TRANSPOSE_CONV_2D"; - case OperationType::UNIDIRECTIONAL_SEQUENCE_LSTM: - return os << "UNIDIRECTIONAL_SEQUENCE_LSTM"; - case OperationType::UNIDIRECTIONAL_SEQUENCE_RNN: - return os << "UNIDIRECTIONAL_SEQUENCE_RNN"; - case OperationType::RESIZE_NEAREST_NEIGHBOR: - return os << "RESIZE_NEAREST_NEIGHBOR"; - case OperationType::QUANTIZED_LSTM: - return os << "QUANTIZED_LSTM"; - case OperationType::IF: - return os << "IF"; - case OperationType::WHILE: - return os << "WHILE"; - case OperationType::ELU: - return os << "ELU"; - case OperationType::HARD_SWISH: - return os << "HARD_SWISH"; - case OperationType::FILL: - return os << "FILL"; - case OperationType::RANK: - return os << "RANK"; - case OperationType::OEM_OPERATION: - return os << "OEM_OPERATION"; - } - if (isExtension(operationType)) { - return os << "Extension OperationType " << underlyingType(operationType); - } - return os << "OperationType{" << underlyingType(operationType) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Request::Argument::LifeTime& lifetime) { - switch (lifetime) { - case Request::Argument::LifeTime::POOL: - return os << "POOL"; - case Request::Argument::LifeTime::NO_VALUE: - return os << "NO_VALUE"; - case Request::Argument::LifeTime::POINTER: - return os << "POINTER"; - } - return os << "Request::Argument::LifeTime{" << underlyingType(lifetime) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Priority& priority) { - switch (priority) { - case Priority::LOW: - return os << "LOW"; - case Priority::MEDIUM: - return os << "MEDIUM"; - case Priority::HIGH: - return os << "HIGH"; - } - return os << "Priority{" << underlyingType(priority) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const ErrorStatus& errorStatus) { - switch (errorStatus) { - case ErrorStatus::NONE: - return os << "NONE"; - case ErrorStatus::DEVICE_UNAVAILABLE: - return os << "DEVICE_UNAVAILABLE"; - case ErrorStatus::GENERAL_FAILURE: - return os << "GENERAL_FAILURE"; - case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: - return os << "OUTPUT_INSUFFICIENT_SIZE"; - case ErrorStatus::INVALID_ARGUMENT: - return os << "INVALID_ARGUMENT"; - case ErrorStatus::MISSED_DEADLINE_TRANSIENT: - return os << "MISSED_DEADLINE_TRANSIENT"; - case ErrorStatus::MISSED_DEADLINE_PERSISTENT: - return os << "MISSED_DEADLINE_PERSISTENT"; - case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: - return os << "RESOURCE_EXHAUSTED_TRANSIENT"; - case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: - return os << "RESOURCE_EXHAUSTED_PERSISTENT"; - case ErrorStatus::DEAD_OBJECT: - return os << "DEAD_OBJECT"; - } - return os << "ErrorStatus{" << underlyingType(errorStatus) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const FusedActivationFunc& activation) { - switch (activation) { - case FusedActivationFunc::NONE: - return os << "NONE"; - case FusedActivationFunc::RELU: - return os << "RELU"; - case FusedActivationFunc::RELU1: - return os << "RELU1"; - case FusedActivationFunc::RELU6: - return os << "RELU6"; - } - return os << "FusedActivationFunc{" << underlyingType(activation) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const OutputShape& outputShape) { - return os << "OutputShape{.dimensions=" << outputShape.dimensions - << ", .isSufficient=" << (outputShape.isSufficient ? "true" : "false") << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Timing& timing) { - return os << "Timing{.timeOnDevice=" << timing.timeOnDevice - << ", .timeInDriver=" << timing.timeInDriver << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Capabilities::PerformanceInfo& performanceInfo) { - return os << "Capabilities::PerformanceInfo{.execTime=" << performanceInfo.execTime - << ", .powerUsage=" << performanceInfo.powerUsage << "}"; -} - -std::ostream& operator<<(std::ostream& os, - const Capabilities::OperandPerformance& operandPerformance) { - return os << "Capabilities::OperandPerformance{.type=" << operandPerformance.type - << ", .info=" << operandPerformance.info << "}"; -} - -std::ostream& operator<<(std::ostream& os, - const Capabilities::OperandPerformanceTable& operandPerformances) { - return os << operandPerformances.asVector(); -} - -std::ostream& operator<<(std::ostream& os, const Capabilities& capabilities) { - return os << "Capabilities{.relaxedFloat32toFloat16PerformanceScalar=" - << capabilities.relaxedFloat32toFloat16PerformanceScalar - << ", .relaxedFloat32toFloat16PerformanceTensor=" - << capabilities.relaxedFloat32toFloat16PerformanceTensor - << ", .operandPerformance=" << capabilities.operandPerformance - << ", .ifPerformance=" << capabilities.ifPerformance - << ", .whilePerformance=" << capabilities.whilePerformance << "}"; -} - -std::ostream& operator<<(std::ostream& os, - const Extension::OperandTypeInformation& operandTypeInformation) { - return os << "Extension::OperandTypeInformation{.type=" << operandTypeInformation.type - << ", .isTensor=" << (operandTypeInformation.isTensor ? "true" : "false") - << ", .byteSize=" << operandTypeInformation.byteSize << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Extension& extension) { - return os << "Extension{.name=" << extension.name - << ", .operandTypes=" << extension.operandTypes << "}"; -} - -std::ostream& operator<<(std::ostream& os, const DataLocation& location) { - const auto printPointer = [&os](const std::variant<const void*, void*>& pointer) { - os << (std::holds_alternative<const void*>(pointer) ? "<constant " : "<mutable "); - os << std::visit( - [](const auto* ptr) { - return ptr == nullptr ? "null pointer>" : "non-null pointer>"; - }, - pointer); - }; - os << "DataLocation{.pointer="; - printPointer(location.pointer); - return os << ", .poolIndex=" << location.poolIndex << ", .offset=" << location.offset - << ", .length=" << location.length << ", .padding=" << location.padding << "}"; -} - -std::ostream& operator<<(std::ostream& os, - const Operand::SymmPerChannelQuantParams& symmPerChannelQuantParams) { - return os << "Operand::SymmPerChannelQuantParams{.scales=" << symmPerChannelQuantParams.scales - << ", .channelDim=" << symmPerChannelQuantParams.channelDim << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Operand::ExtraParams& extraParams) { - os << "Operand::ExtraParams{"; - if (std::holds_alternative<Operand::NoParams>(extraParams)) { - os << "<no params>"; - } else if (std::holds_alternative<Operand::SymmPerChannelQuantParams>(extraParams)) { - os << std::get<Operand::SymmPerChannelQuantParams>(extraParams); - } else if (std::holds_alternative<Operand::ExtensionParams>(extraParams)) { - os << std::get<Operand::ExtensionParams>(extraParams); - } - return os << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Operand& operand) { - return os << "Operand{.type=" << operand.type << ", .dimensions=" << operand.dimensions - << ", .scale=" << operand.scale << ", .zeroPoint=" << operand.zeroPoint - << ", lifetime=" << operand.lifetime << ", .location=" << operand.location - << ", .extraParams=" << operand.extraParams << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Operation& operation) { - return os << "Operation{.type=" << operation.type << ", .inputs=" << operation.inputs - << ", .outputs=" << operation.outputs << "}"; -} - -static std::ostream& operator<<(std::ostream& os, const Handle& handle) { - return os << "<handle with " << handle.fds.size() << " fds and " << handle.ints.size() - << " ints>"; -} - -std::ostream& operator<<(std::ostream& os, const SharedHandle& handle) { - if (handle == nullptr) { - return os << "<empty handle>"; - } - return os << *handle; -} - -static std::ostream& operator<<(std::ostream& os, const Memory::Ashmem& memory) { - return os << "Ashmem{.fd=" << (memory.fd.ok() ? "<valid fd>" : "<invalid fd>") - << ", .size=" << memory.size << "}"; -} - -static std::ostream& operator<<(std::ostream& os, const Memory::Fd& memory) { - return os << "Fd{.size=" << memory.size << ", .prot=" << memory.prot - << ", .fd=" << (memory.fd.ok() ? "<valid fd>" : "<invalid fd>") - << ", .offset=" << memory.offset << "}"; -} - -static std::ostream& operator<<(std::ostream& os, const Memory::HardwareBuffer& memory) { - if (memory.handle.get() == nullptr) { - return os << "<empty HardwareBuffer::Handle>"; - } - return os << (isAhwbBlob(memory) ? "<AHardwareBuffer blob>" : "<non-blob AHardwareBuffer>"); -} - -static std::ostream& operator<<(std::ostream& os, const Memory::Unknown& memory) { - return os << "Unknown{.handle=" << memory.handle << ", .size=" << memory.size - << ", .name=" << memory.name << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Memory& memory) { - os << "Memory{.handle="; - std::visit([&os](const auto& x) { os << x; }, memory.handle); - return os << "}"; -} - -std::ostream& operator<<(std::ostream& os, const SharedMemory& memory) { - if (memory == nullptr) { - return os << "<empty memory>"; - } - return os << *memory; -} - -std::ostream& operator<<(std::ostream& os, const MemoryPreference& memoryPreference) { - return os << "MemoryPreference{.alignment=" << memoryPreference.alignment - << ", .padding=" << memoryPreference.padding << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Model::Subgraph& subgraph) { - std::vector<Operand> operands; - std::vector<Operation> operations; - std::vector<uint32_t> inputIndexes; - std::vector<uint32_t> outputIndexes; - return os << "Model::Subgraph{.operands=" << subgraph.operands - << ", .operations=" << subgraph.operations - << ", .inputIndexes=" << subgraph.inputIndexes - << ", .outputIndexes=" << subgraph.outputIndexes << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Model::OperandValues& operandValues) { - return os << "Model::OperandValues{<" << operandValues.size() << "bytes>}"; -} - -std::ostream& operator<<(std::ostream& os, - const Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { - return os << "Model::ExtensionNameAndPrefix{.name=" << extensionNameAndPrefix.name - << ", .prefix=" << extensionNameAndPrefix.prefix << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Model& model) { - return os << "Model{.main=" << model.main << ", .referenced=" << model.referenced - << ", .operandValues=" << model.operandValues << ", .pools=" << model.pools - << ", .relaxComputationFloat32toFloat16=" - << (model.relaxComputationFloat32toFloat16 ? "true" : "false") - << ", extensionNameToPrefix=" << model.extensionNameToPrefix << "}"; -} - -std::ostream& operator<<(std::ostream& os, const BufferDesc& bufferDesc) { - return os << "BufferDesc{.dimensions=" << bufferDesc.dimensions << "}"; -} - -std::ostream& operator<<(std::ostream& os, const BufferRole& bufferRole) { - return os << "BufferRole{.modelIndex=" << bufferRole.modelIndex - << ", .ioIndex=" << bufferRole.ioIndex << ", .probability=" << bufferRole.probability - << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Request::Argument& requestArgument) { - return os << "Request::Argument{.lifetime=" << requestArgument.lifetime - << ", .location=" << requestArgument.location - << ", .dimensions=" << requestArgument.dimensions << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Request::MemoryPool& memoryPool) { - os << "Request::MemoryPool{"; - if (std::holds_alternative<SharedMemory>(memoryPool)) { - os << std::get<SharedMemory>(memoryPool); - } else if (std::holds_alternative<Request::MemoryDomainToken>(memoryPool)) { - const auto& token = std::get<Request::MemoryDomainToken>(memoryPool); - if (token == Request::MemoryDomainToken{}) { - os << "<invalid MemoryDomainToken>"; - } else { - os << "MemoryDomainToken=" << underlyingType(token); - } - } else if (std::holds_alternative<SharedBuffer>(memoryPool)) { - const auto& buffer = std::get<SharedBuffer>(memoryPool); - os << (buffer != nullptr ? "<non-null IBuffer>" : "<null IBuffer>"); - } - return os << "}"; -} - -std::ostream& operator<<(std::ostream& os, const Request& request) { - return os << "Request{.inputs=" << request.inputs << ", .outputs=" << request.outputs - << ", .pools=" << request.pools << "}"; -} - -std::ostream& operator<<(std::ostream& os, const SyncFence::FenceState& fenceState) { - switch (fenceState) { - case SyncFence::FenceState::ACTIVE: - return os << "ACTIVE"; - case SyncFence::FenceState::SIGNALED: - return os << "SIGNALED"; - case SyncFence::FenceState::ERROR: - return os << "ERROR"; - case SyncFence::FenceState::UNKNOWN: - return os << "UNKNOWN"; - } - return os << "SyncFence::FenceState{" << underlyingType(fenceState) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const TimePoint& timePoint) { - return os << timePoint.time_since_epoch() << " since epoch"; -} - -std::ostream& operator<<(std::ostream& os, const OptionalTimePoint& optionalTimePoint) { - if (!optionalTimePoint.has_value()) { - return os << "<no time point>"; - } - return os << optionalTimePoint.value(); -} - -std::ostream& operator<<(std::ostream& os, const Duration& timeoutDuration) { - return os << timeoutDuration.count() << "ns"; -} - -std::ostream& operator<<(std::ostream& os, const OptionalDuration& optionalTimeoutDuration) { - if (!optionalTimeoutDuration.has_value()) { - return os << "<no duration>"; - } - return os << optionalTimeoutDuration.value(); -} - -std::ostream& operator<<(std::ostream& os, const Version& version) { - switch (version) { - case Version::ANDROID_OC_MR1: - return os << "ANDROID_OC_MR1"; - case Version::ANDROID_P: - return os << "ANDROID_P"; - case Version::ANDROID_Q: - return os << "ANDROID_Q"; - case Version::ANDROID_R: - return os << "ANDROID_R"; - case Version::ANDROID_S: - return os << "ANDROID_S"; - case Version::CURRENT_RUNTIME: - return os << "CURRENT_RUNTIME"; - } - return os << "Version{" << underlyingType(version) << "}"; -} - -std::ostream& operator<<(std::ostream& os, const HalVersion& halVersion) { - switch (halVersion) { - case HalVersion::UNKNOWN: - return os << "UNKNOWN HAL version"; - case HalVersion::V1_0: - return os << "HAL version 1.0"; - case HalVersion::V1_1: - return os << "HAL version 1.1"; - case HalVersion::V1_2: - return os << "HAL version 1.2"; - case HalVersion::V1_3: - return os << "HAL version 1.3"; - case HalVersion::AIDL_UNSTABLE: - return os << "HAL uses unstable AIDL"; - } - return os << "HalVersion{" << underlyingType(halVersion) << "}"; -} - -bool operator==(const Timing& a, const Timing& b) { - return a.timeOnDevice == b.timeOnDevice && a.timeInDriver == b.timeInDriver; -} -bool operator!=(const Timing& a, const Timing& b) { - return !(a == b); -} - -bool operator==(const Capabilities::PerformanceInfo& a, const Capabilities::PerformanceInfo& b) { - return a.execTime == b.execTime && a.powerUsage == b.powerUsage; -} -bool operator!=(const Capabilities::PerformanceInfo& a, const Capabilities::PerformanceInfo& b) { - return !(a == b); -} - -bool operator==(const Capabilities::OperandPerformance& a, - const Capabilities::OperandPerformance& b) { - return a.type == b.type && a.info == b.info; -} -bool operator!=(const Capabilities::OperandPerformance& a, - const Capabilities::OperandPerformance& b) { - return !(a == b); -} - -bool operator==(const Capabilities& a, const Capabilities& b) { - return a.relaxedFloat32toFloat16PerformanceScalar == - b.relaxedFloat32toFloat16PerformanceScalar && - a.relaxedFloat32toFloat16PerformanceTensor == - b.relaxedFloat32toFloat16PerformanceTensor && - a.operandPerformance.asVector() == b.operandPerformance.asVector() && - a.ifPerformance == b.ifPerformance && a.whilePerformance == b.whilePerformance; -} -bool operator!=(const Capabilities& a, const Capabilities& b) { - return !(a == b); -} - -bool operator==(const Extension::OperandTypeInformation& a, - const Extension::OperandTypeInformation& b) { - return a.type == b.type && a.isTensor == b.isTensor && a.byteSize == b.byteSize; -} -bool operator!=(const Extension::OperandTypeInformation& a, - const Extension::OperandTypeInformation& b) { - return !(a == b); -} - -bool operator==(const Extension& a, const Extension& b) { - return a.name == b.name && a.operandTypes == b.operandTypes; -} -bool operator!=(const Extension& a, const Extension& b) { - return !(a == b); -} - -bool operator==(const MemoryPreference& a, const MemoryPreference& b) { - return a.alignment == b.alignment && a.padding == b.padding; -} -bool operator!=(const MemoryPreference& a, const MemoryPreference& b) { - return !(a == b); -} - -bool operator==(const Operand::SymmPerChannelQuantParams& a, - const Operand::SymmPerChannelQuantParams& b) { - return a.scales == b.scales && a.channelDim == b.channelDim; -} -bool operator!=(const Operand::SymmPerChannelQuantParams& a, - const Operand::SymmPerChannelQuantParams& b) { - return !(a == b); -} - -static bool operator==(const DataLocation& a, const DataLocation& b) { - constexpr auto toTuple = [](const DataLocation& location) { - return std::tie(location.pointer, location.poolIndex, location.offset, location.length, - location.padding); - }; - return toTuple(a) == toTuple(b); -} - -bool operator==(const Operand& a, const Operand& b) { - constexpr auto toTuple = [](const Operand& operand) { - return std::tie(operand.type, operand.dimensions, operand.scale, operand.zeroPoint, - operand.lifetime, operand.location, operand.extraParams); - }; - return toTuple(a) == toTuple(b); -} -bool operator!=(const Operand& a, const Operand& b) { - return !(a == b); -} - -bool operator==(const Operation& a, const Operation& b) { - constexpr auto toTuple = [](const Operation& operation) { - return std::tie(operation.type, operation.inputs, operation.outputs); - }; - return toTuple(a) == toTuple(b); -} -bool operator!=(const Operation& a, const Operation& b) { - return !(a == b); -} - -} // namespace android::nn
diff --git a/common/Types.cpp b/common/Types.cpp deleted file mode 100644 index 6aa37e0..0000000 --- a/common/Types.cpp +++ /dev/null
@@ -1,188 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "Types.h" - -#include <android-base/logging.h> -#include <errno.h> -#include <poll.h> - -#include <algorithm> -#include <cstddef> -#include <iterator> -#include <limits> -#include <memory> -#include <optional> -#include <utility> -#include <vector> - -#include "OperandTypes.h" -#include "OperationTypes.h" -#include "Result.h" -#include "TypeUtils.h" - -namespace android::nn { - -// Ensure that std::vector<uint8_t>::data() will always have sufficient alignment to hold all NNAPI -// primitive types. "4" is chosen because that is the maximum alignment returned by -// `getAlignmentForLength`. However, this value will have to be changed if `getAlignmentForLength` -// returns a larger alignment. -static_assert(__STDCPP_DEFAULT_NEW_ALIGNMENT__ >= 4, "`New` alignment is not sufficient"); - -Model::OperandValues::OperandValues() { - constexpr size_t kNumberBytes = 4 * 1024; - mData.reserve(kNumberBytes); -} - -Model::OperandValues::OperandValues(const uint8_t* data, size_t length) - : mData(data, data + length) {} - -DataLocation Model::OperandValues::append(const uint8_t* data, size_t length) { - CHECK_GT(length, 0u); - CHECK_LE(length, std::numeric_limits<uint32_t>::max()); - const size_t alignment = getAlignmentForLength(length); - const size_t offset = roundUp(size(), alignment); - CHECK_LE(offset, std::numeric_limits<uint32_t>::max()); - mData.resize(offset + length); - CHECK_LE(size(), std::numeric_limits<uint32_t>::max()); - std::memcpy(mData.data() + offset, data, length); - return {.offset = static_cast<uint32_t>(offset), .length = static_cast<uint32_t>(length)}; -} - -const uint8_t* Model::OperandValues::data() const { - return mData.data(); -} - -size_t Model::OperandValues::size() const { - return mData.size(); -} - -Capabilities::OperandPerformanceTable::OperandPerformanceTable( - std::vector<OperandPerformance> operandPerformances) - : mSorted(std::move(operandPerformances)) {} - -Result<Capabilities::OperandPerformanceTable> Capabilities::OperandPerformanceTable::create( - std::vector<OperandPerformance> operandPerformances) { - const auto notUnique = [](const auto& lhs, const auto& rhs) { return !(lhs.type < rhs.type); }; - const bool isUnique = std::adjacent_find(operandPerformances.begin(), operandPerformances.end(), - notUnique) == operandPerformances.end(); - if (!isUnique) { - return NN_ERROR() << "Failed to create OperandPerformanceTable: Input must be sorted by " - "key (in ascending order), and there must be no duplicate keys"; - } - - return Capabilities::OperandPerformanceTable(std::move(operandPerformances)); -} - -Capabilities::PerformanceInfo Capabilities::OperandPerformanceTable::lookup( - OperandType operandType) const { - // Search for operand type in the sorted collection. - constexpr auto cmp = [](const auto& performance, auto type) { return performance.type < type; }; - const auto it = std::lower_bound(mSorted.begin(), mSorted.end(), operandType, cmp); - - // If the operand type is found, return its corresponding info. - if (it != mSorted.end() && it->type == operandType) { - return it->info; - } - - // If no performance info is defined, use the default value (float's max). - return Capabilities::PerformanceInfo{}; -} - -const std::vector<Capabilities::OperandPerformance>& -Capabilities::OperandPerformanceTable::asVector() const { - return mSorted; -} - -SyncFence SyncFence::createAsSignaled() { - return SyncFence(nullptr); -} - -SyncFence SyncFence::create(base::unique_fd fd) { - std::vector<base::unique_fd> fds; - fds.push_back(std::move(fd)); - return SyncFence(std::make_shared<const Handle>(Handle{ - .fds = std::move(fds), - .ints = {}, - })); -} - -Result<SyncFence> SyncFence::create(SharedHandle syncFence) { - const bool isValid = - (syncFence != nullptr && syncFence->fds.size() == 1 && syncFence->ints.empty()); - if (!isValid) { - return NN_ERROR() << "Invalid sync fence handle passed to SyncFence::create"; - } - return SyncFence(std::move(syncFence)); -} - -SyncFence::SyncFence(SharedHandle syncFence) : mSyncFence(std::move(syncFence)) {} - -SyncFence::FenceState SyncFence::syncWait(OptionalTimeout optionalTimeout) const { - if (mSyncFence == nullptr) { - return FenceState::SIGNALED; - } - - const int fd = mSyncFence->fds.front().get(); - const int timeout = optionalTimeout.value_or(Timeout{-1}).count(); - - // This implementation is directly based on the ::sync_wait() implementation. - - struct pollfd fds; - int ret; - - if (fd < 0) { - errno = EINVAL; - return FenceState::UNKNOWN; - } - - fds.fd = fd; - fds.events = POLLIN; - - do { - ret = poll(&fds, 1, timeout); - if (ret > 0) { - if (fds.revents & POLLNVAL) { - errno = EINVAL; - return FenceState::UNKNOWN; - } - if (fds.revents & POLLERR) { - errno = EINVAL; - return FenceState::ERROR; - } - return FenceState::SIGNALED; - } else if (ret == 0) { - errno = ETIME; - return FenceState::ACTIVE; - } - } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); - - return FenceState::UNKNOWN; -} - -SharedHandle SyncFence::getSharedHandle() const { - return mSyncFence; -} - -bool SyncFence::hasFd() const { - return mSyncFence != nullptr; -} - -int SyncFence::getFd() const { - return mSyncFence == nullptr ? -1 : mSyncFence->fds.front().get(); -} - -} // namespace android::nn
diff --git a/common/Utils.cpp b/common/Utils.cpp new file mode 100644 index 0000000..1694c9c --- /dev/null +++ b/common/Utils.cpp
@@ -0,0 +1,3261 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Utils" + +#include "Utils.h" + +#include <android-base/logging.h> +#include <android-base/properties.h> +#include <android-base/strings.h> +#include <errno.h> +#include <poll.h> +#include <sys/system_properties.h> + +#include <algorithm> +#include <functional> +#include <iostream> +#include <limits> +#include <numeric> +#include <set> +#include <string> +#include <tuple> +#include <unordered_map> +#include <utility> +#include <vector> + +#include "ControlFlow.h" +#include "NeuralNetworks.h" +#include "NeuralNetworksOEM.h" +#include "OperationResolver.h" +#include "ValidateHal.h" + +namespace android { +namespace nn { + +using namespace hal; + +constexpr PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX}; + +const char kVLogPropKey[] = "debug.nn.vlog"; +int vLogMask = ~0; + +// Split the space separated list of tags from verbose log setting and build the +// logging mask from it. note that '1' and 'all' are special cases to enable all +// verbose logging. +// +// NN API verbose logging setting comes from system property debug.nn.vlog. +// Example: +// setprop debug.nn.vlog 1 : enable all logging tags. +// setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and +// COMPILATION tags. +void initVLogMask() { + vLogMask = 0; + const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, ""); + if (vLogSetting.empty()) { + return; + } + + std::unordered_map<std::string, int> vLogFlags = {{"1", -1}, + {"all", -1}, + {"model", MODEL}, + {"compilation", COMPILATION}, + {"execution", EXECUTION}, + {"cpuexe", CPUEXE}, + {"manager", MANAGER}, + {"driver", DRIVER}, + {"memory", MEMORY}}; + + std::vector<std::string> elements = android::base::Split(vLogSetting, " ,:"); + for (const auto& elem : elements) { + const auto& flag = vLogFlags.find(elem); + if (flag == vLogFlags.end()) { + LOG(ERROR) << "Unknown trace flag: " << elem; + continue; + } + + if (flag->second == -1) { + // -1 is used for the special values "1" and "all" that enable all + // tracing. + vLogMask = ~0; + return; + } else { + vLogMask |= 1 << flag->second; + } + } +} + +Deadline makeDeadline(uint64_t duration) { + const auto maxTime = Deadline::max(); + const auto currentTime = std::chrono::steady_clock::now(); + + // Create Deadline. If there would be an overflow, use the max value. + const uint64_t remainingNanoseconds = + std::chrono::duration_cast<std::chrono::nanoseconds>(maxTime - currentTime).count(); + if (duration > remainingNanoseconds) { + return maxTime; + } + return currentTime + std::chrono::nanoseconds{duration}; +} + +std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration) { + return duration.has_value() ? makeDeadline(*duration) : std::optional<Deadline>{}; +} + +static uint64_t getMaxNanosecondsSinceEpoch() { + const auto maxTime = + std::chrono::time_point<std::chrono::steady_clock, std::chrono::nanoseconds>::max(); + return maxTime.time_since_epoch().count(); +} + +std::optional<Deadline> makeDeadline(const OptionalTimePoint& timePoint) { + using Discriminator = hal::OptionalTimePoint::hidl_discriminator; + if (timePoint.getDiscriminator() == Discriminator::none) { + return std::nullopt; + } + const uint64_t nanosecondsSinceEpoch = timePoint.nanosecondsSinceEpoch(); + const uint64_t maxNanosecondsSinceEpoch = getMaxNanosecondsSinceEpoch(); + + // Clamp time point to max. + if (nanosecondsSinceEpoch >= maxNanosecondsSinceEpoch) { + return Deadline::max(); + } + + // Return provided time point. + return Deadline{std::chrono::nanoseconds{nanosecondsSinceEpoch}}; +} + +bool hasDeadlinePassed(const std::optional<Deadline>& deadline) { + if (!deadline.has_value()) { + return false; + } + return std::chrono::steady_clock::now() >= *deadline; +} + +static OptionalTimePoint makeTimePoint(const Deadline& deadline) { + const auto timeSinceEpoch = deadline.time_since_epoch(); + const uint64_t nanosecondsSinceEpoch = + std::chrono::duration_cast<std::chrono::nanoseconds>(timeSinceEpoch).count(); + OptionalTimePoint ret; + ret.nanosecondsSinceEpoch(nanosecondsSinceEpoch); + return ret; +} + +OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline) { + return deadline.has_value() ? makeTimePoint(*deadline) : OptionalTimePoint{}; +} + +static bool isExtensionOperandType(int32_t type) { + return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperandTypeRange::BASE_MAX); +} + +static bool isExtensionOperationType(ANeuralNetworksOperationType type) { + return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperationTypeRange::BASE_MAX); +} + +bool isExtensionOperandType(OperandType type) { + return isExtensionOperandType(static_cast<int32_t>(type)); +} + +bool isExtensionOperationType(OperationType type) { + return isExtensionOperationType(static_cast<int32_t>(type)); +} + +namespace { + +template <typename EntryType, uint32_t entryCount, uint32_t entryCountOEM> +EntryType tableLookup(const EntryType (&table)[entryCount], + const EntryType (&tableOEM)[entryCountOEM], uint32_t code) { + if (code < entryCount) { + return table[code]; + } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) { + return tableOEM[code - kOEMCodeBase]; + } else { + nnAssert(!"tableLookup: bad code"); + return EntryType(); + } +} + +class OperationValidationContext : public IOperationValidationContext { + DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext); + + public: + OperationValidationContext(const char* operationName, uint32_t inputCount, + const uint32_t* inputIndexes, uint32_t outputCount, + const uint32_t* outputIndexes, const Operand* operands, + HalVersion halVersion) + : operationName(operationName), + inputCount(inputCount), + inputIndexes(inputIndexes), + outputCount(outputCount), + outputIndexes(outputIndexes), + operands(operands), + halVersion(halVersion) {} + + const char* getOperationName() const override; + HalVersion getHalVersion() const override; + + uint32_t getNumInputs() const override; + OperandType getInputType(uint32_t index) const override; + Shape getInputShape(uint32_t index) const override; + const OperandExtraParams getInputExtraParams(uint32_t index) const override; + + uint32_t getNumOutputs() const override; + OperandType getOutputType(uint32_t index) const override; + Shape getOutputShape(uint32_t index) const override; + + private: + const Operand* getInputOperand(uint32_t index) const; + const Operand* getOutputOperand(uint32_t index) const; + + const char* operationName; + uint32_t inputCount; + const uint32_t* inputIndexes; + uint32_t outputCount; + const uint32_t* outputIndexes; + const Operand* operands; + HalVersion halVersion; +}; + +const char* OperationValidationContext::getOperationName() const { + return operationName; +} + +HalVersion OperationValidationContext::getHalVersion() const { + return halVersion; +} + +const Operand* OperationValidationContext::getInputOperand(uint32_t index) const { + CHECK(index < static_cast<uint32_t>(inputCount)); + return &operands[inputIndexes[index]]; +} + +const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const { + CHECK(index < static_cast<uint32_t>(outputCount)); + return &operands[outputIndexes[index]]; +} + +uint32_t OperationValidationContext::getNumInputs() const { + return inputCount; +} + +uint32_t OperationValidationContext::getNumOutputs() const { + return outputCount; +} + +OperandType OperationValidationContext::getInputType(uint32_t index) const { + return getInputOperand(index)->type; +} + +Shape OperationValidationContext::getInputShape(uint32_t index) const { + const Operand* operand = getInputOperand(index); + return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, + operand->extraParams}; +} + +const OperandExtraParams OperationValidationContext::getInputExtraParams(uint32_t index) const { + return getInputOperand(index)->extraParams; +} + +OperandType OperationValidationContext::getOutputType(uint32_t index) const { + return getOutputOperand(index)->type; +} + +Shape OperationValidationContext::getOutputShape(uint32_t index) const { + const Operand* operand = getOutputOperand(index); + return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, + operand->extraParams}; +} + +}; // anonymous namespace + +#define COUNT(X) (sizeof(X) / sizeof(X[0])) + +std::string getOperandTypeName(OperandType type) { + return toString(type); +} + +static std::string getOperationName(uint32_t code) { + return getOperationName(static_cast<OperationType>(code)); +} + +std::string getOperationName(OperationType type) { + return toString(type); +} + +const uint32_t kSizeOfDataType[]{ + 4, // ANEURALNETWORKS_FLOAT32 + 4, // ANEURALNETWORKS_INT32 + 4, // ANEURALNETWORKS_UINT32 + 4, // ANEURALNETWORKS_TENSOR_FLOAT32 + 4, // ANEURALNETWORKS_TENSOR_INT32 + 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM + 1, // ANEURALNETWORKS_BOOL + 2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM + 2, // ANEURALNETWORKS_TENSOR_FLOAT16 + 1, // ANEURALNETWORKS_TENSOR_BOOL8 + 2, // ANEURALNETWORKS_FLOAT16 + 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL + 2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM + 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM + 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED + 0, // ANEURALNETWORKS_MODEL +}; + +static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect"); + +const bool kScalarDataType[]{ + true, // ANEURALNETWORKS_FLOAT32 + true, // ANEURALNETWORKS_INT32 + true, // ANEURALNETWORKS_UINT32 + false, // ANEURALNETWORKS_TENSOR_FLOAT32 + false, // ANEURALNETWORKS_TENSOR_INT32 + false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM + true, // ANEURALNETWORKS_BOOL + false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM + false, // ANEURALNETWORKS_TENSOR_FLOAT16 + false, // ANEURALNETWORKS_TENSOR_BOOL8 + true, // ANEURALNETWORKS_FLOAT16 + false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL + false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM + false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM + false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED + true, // ANEURALNETWORKS_MODEL +}; + +static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect"); + +const uint32_t kSizeOfDataTypeOEM[]{ + 0, // ANEURALNETWORKS_OEM + 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE +}; + +static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM, + "kSizeOfDataTypeOEM is incorrect"); + +const bool kScalarDataTypeOEM[]{ + true, // ANEURALNETWORKS_OEM + false, // ANEURALNETWORKS_TENSOR_OEM_BYTE +}; + +static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM, + "kScalarDataTypeOEM is incorrect"); + +bool nonExtensionOperandTypeIsScalar(int type) { + CHECK(!isExtensionOperandType(type)) << "Extension operand types are not supported"; + return tableLookup(kScalarDataType, kScalarDataTypeOEM, type); +} + +uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) { + CHECK(!isExtensionOperandType(type)) << "Size of extension operand data is unknown"; + int n = static_cast<int>(type); + uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n); + return tableLookup(kScalarDataType, kScalarDataTypeOEM, n) + ? sizeOfElement + : sizeOfTensorData(sizeOfElement, dimensions); +} + +// Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t. +static std::pair<bool, uint32_t> sizeOfTensorDataHelper(uint32_t sizeOfElement, + const std::vector<uint32_t>& dimensions) { + if (dimensions.empty()) { + return {false, 0}; + } + uint64_t size = static_cast<uint64_t>(sizeOfElement); + constexpr uint64_t kMaxSize = static_cast<uint64_t>(std::numeric_limits<uint32_t>::max()); + for (uint32_t d : dimensions) { + size *= d; + if (size > kMaxSize) return {true, 0}; + } + return {false, static_cast<uint32_t>(size)}; +} + +uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& dimensions) { + const auto [overflow, size] = sizeOfTensorDataHelper(sizeOfElement, dimensions); + CHECK(!overflow); + return size; +} + +bool nonExtensionOperandSizeOfDataOverflowsUInt32(hal::OperandType type, + const std::vector<uint32_t>& dimensions) { + CHECK(!isExtensionOperandType(type)) << "Size of extension operand data is unknown"; + int n = static_cast<int>(type); + uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n); + return tableLookup(kScalarDataType, kScalarDataTypeOEM, n) + ? false + : sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions); +} + +bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement, + const std::vector<uint32_t>& dimensions) { + return sizeOfTensorDataHelper(sizeOfElement, dimensions).first; +} + +bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount) { + if (!isExtensionOperandType(type)) { + CHECK(!nonExtensionOperandTypeIsScalar(type)) + << "A scalar type can never have unspecified dimensions"; + } + return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount); +} + +bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t>& dimensions) { + return tensorHasUnspecifiedDimensions(static_cast<int>(type), dimensions.data(), + dimensions.size()); +} + +bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) { + return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount); +} + +bool tensorHasUnspecifiedDimensions(const Operand& operand) { + return tensorHasUnspecifiedDimensions(static_cast<int>(operand.type), operand.dimensions.data(), + operand.dimensions.size()); +} + +uint32_t alignBytesNeeded(uint32_t index, size_t length) { + uint32_t pattern; + if (length < 2) { + pattern = 0; // No alignment necessary + } else if (length < 4) { + pattern = 1; // Align on 2-byte boundary + } else { + pattern = 3; // Align on 4-byte boundary + } + uint32_t extra = (~(index - 1)) & pattern; + return extra; +} + +void logModelToInfo(const V1_0::Model& model) { + LOG(INFO) << "V1_0::Model start"; + LOG(INFO) << "operands" << toString(model.operands); + LOG(INFO) << "operations" << toString(model.operations); + LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); + LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); + LOG(INFO) << "operandValues size" << model.operandValues.size(); + LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); +} + +void logModelToInfo(const V1_1::Model& model) { + LOG(INFO) << "V1_1::Model start"; + LOG(INFO) << "operands" << toString(model.operands); + LOG(INFO) << "operations" << toString(model.operations); + LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); + LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); + LOG(INFO) << "operandValues size " << model.operandValues.size(); + LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); +} + +void logModelToInfo(const V1_2::Model& model) { + LOG(INFO) << "V1_2::Model start"; + LOG(INFO) << "operands" << toString(model.operands); + LOG(INFO) << "operations" << toString(model.operations); + LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); + LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); + LOG(INFO) << "operandValues size" << model.operandValues.size(); + LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); + LOG(INFO) << "relaxComputationFloat32toFloat16" << model.relaxComputationFloat32toFloat16; + LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); +} + +static void logSubgraphToInfo(std::string label, const V1_3::Subgraph& subgraph) { + LOG(INFO) << label << ".operands" << toString(subgraph.operands); + LOG(INFO) << label << ".operations" << toString(subgraph.operations); + LOG(INFO) << label << ".inputIndexes" << toString(subgraph.inputIndexes); + LOG(INFO) << label << ".outputIndexes" << toString(subgraph.outputIndexes); +} + +void logModelToInfo(const V1_3::Model& model) { + LOG(INFO) << "V1_3::Model start"; + logSubgraphToInfo("main", model.main); + for (uint32_t i = 0, n = model.referenced.size(); i < n; ++i) { + logSubgraphToInfo("referenced[" + std::to_string(i) + "]", model.referenced[i]); + } + LOG(INFO) << "operandValues size " << model.operandValues.size(); + LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); + LOG(INFO) << "relaxComputationFloat32toFloat16 " << model.relaxComputationFloat32toFloat16; + LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); +} + +bool validateOperandSymmPerChannelQuantParams( + const Operand& halOperand, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, + const char* tag) { + if (halOperand.type != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { + return false; + } + + NN_RET_CHECK_LT(channelQuant.channelDim, halOperand.dimensions.size()) << tag; + NN_RET_CHECK(channelQuant.scales != nullptr) << tag; + NN_RET_CHECK_EQ(channelQuant.scaleCount, halOperand.dimensions[channelQuant.channelDim]) << tag; + NN_RET_CHECK_NE(halOperand.dimensions[channelQuant.channelDim], 0u) + << tag << " channel dimension " << channelQuant.channelDim << " is underspecified"; + for (uint32_t i = 0; i < halOperand.dimensions[channelQuant.channelDim]; i++) { + NN_RET_CHECK_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]"; + } + return true; +} + +static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK_EQ(type.dimensionCount, 0u) << tag << " invalid dimensions for scalar type"; + NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type"; + return true; +} + +static bool validateQuant8AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255) + << tag << " invalid zeroPoint: " << type.zeroPoint; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType& type, + const char* tag) { + NN_RET_CHECK(-128 <= type.zeroPoint && type.zeroPoint <= 127) + << tag << " invalid zeroPoint: " << type.zeroPoint; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateQuant16AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535) + << tag << " invalid zeroPoint: " << type.zeroPoint; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateQuantSymmParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateNoQuantParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; + NN_RET_CHECK_EQ(type.scale, 0.f) << tag << " scale is not zero"; + return true; +} + +static bool validateTensorDimensions( + const ANeuralNetworksOperandType& type, + const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, + bool allowPartial) { + if (!allowPartial) { + NN_RET_CHECK_GT(type.dimensionCount, 0u) << tag << " invalid operand dimensions"; + } + uint64_t size = + isExtensionOperandType(type.type) + ? extensionOperandTypeInfo->byteSize + : tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, static_cast<int>(type.type)); + constexpr uint64_t kMaxSize = std::numeric_limits<uint32_t>::max(); + for (uint32_t i = 0; i < type.dimensionCount; i++) { + if (!allowPartial) { + NN_RET_CHECK_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions"; + } + if (type.dimensions[i] != 0) { + size *= type.dimensions[i]; + NN_RET_CHECK_LE(size, kMaxSize) << tag << " operand byte size exceeds " << kMaxSize; + } + } + return true; +} + +static bool validateOperandTypeHelper( + const ANeuralNetworksOperandType& type, + const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, + bool allowPartial) { + NN_RET_CHECK_EQ(type.dimensionCount == 0, type.dimensions == nullptr); + if (isExtensionOperandType(type.type)) { + NN_RET_CHECK(extensionOperandTypeInfo != nullptr); + if (extensionOperandTypeInfo->isTensor) { + NN_RET_CHECK( + validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); + } else { + NN_RET_CHECK(validateScalarDimensions(type, tag)); + } + return validateNoQuantParams(type, tag); + } + + NN_RET_CHECK(extensionOperandTypeInfo == nullptr); + NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type)) + << tag << " invalid OperandType: " << type.type; + + bool isScalar = tableLookup(kScalarDataType, kScalarDataTypeOEM, type.type); + if (isScalar) { + NN_RET_CHECK(validateScalarDimensions(type, tag)); + if (type.type != ANEURALNETWORKS_OEM_SCALAR) { // Historically, we have allowed OEM types + // to use quantization parameters. + NN_RET_CHECK(validateNoQuantParams(type, tag)); + } + } else { + NN_RET_CHECK(validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); + if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) { + NN_RET_CHECK(validateQuant8AsymmParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RET_CHECK(validateQuant8AsymmSignedParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) { + NN_RET_CHECK(validateQuant8SymmParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) { + NN_RET_CHECK(validateQuant16AsymmParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) { + NN_RET_CHECK(validateQuantSymmParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_INT32) { + // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters. + } else if (type.type == ANEURALNETWORKS_TENSOR_OEM_BYTE) { + // Historically, we have allowed OEM types to use quantization parameters. + } else { + NN_RET_CHECK(validateNoQuantParams(type, tag)); + } + } + + return true; +} + +int validateOperandType(const ANeuralNetworksOperandType& type, + const Extension::OperandTypeInformation* const extensionOperandTypeInfo, + const char* tag, bool allowPartial) { + return validateOperandTypeHelper(type, extensionOperandTypeInfo, tag, allowPartial) + ? ANEURALNETWORKS_NO_ERROR + : ANEURALNETWORKS_BAD_DATA; +} + +int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount, + const char* tag) { + for (uint32_t i = 0; i < count; i++) { + if (list[i] >= operandCount) { + LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i] + << ", operandCount " << operandCount; + return ANEURALNETWORKS_BAD_DATA; + } + } + return ANEURALNETWORKS_NO_ERROR; +} + +int validateOperationOperandTypes(const std::vector<Operand>& operands, uint32_t inOperandCount, + const uint32_t* inOperandIndexes, + const std::vector<OperandType>& inExpectedTypes, + uint32_t outOperandCount, const uint32_t* outOperandIndexes, + const std::vector<OperandType>& outExpectedInTypes) { + if (inOperandCount != static_cast<uint32_t>(inExpectedTypes.size()) || + outOperandCount != static_cast<uint32_t>(outExpectedInTypes.size())) { + LOG(ERROR) << "Wrong operand count: expected " << inExpectedTypes.size() << " inputs and " + << outExpectedInTypes.size() << " outputs," + << "got " << inOperandCount << " inputs and " << outOperandCount << " outputs"; + return ANEURALNETWORKS_BAD_DATA; + } + for (uint32_t i = 0; i < inOperandCount; i++) { + if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) { + LOG(ERROR) << "Invalid input tensor type " + << toString(operands[inOperandIndexes[i]].type) << " for input " << i + << ", expected " << toString(inExpectedTypes[i]); + return ANEURALNETWORKS_BAD_DATA; + } + } + for (uint32_t i = 0; i < outOperandCount; i++) { + if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) { + LOG(ERROR) << "Invalid output tensor type " + << toString(operands[outOperandIndexes[i]].type) << " for input " << i + << ", expected " << toString(outExpectedInTypes[i]); + return ANEURALNETWORKS_BAD_DATA; + } + } + + return ANEURALNETWORKS_NO_ERROR; +} + +static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion, + HalVersion minSupportedHalVersion) { + if (halVersion < minSupportedHalVersion) { + LOG(ERROR) << "The given inputs and outputs for operation " << getOperationName(opType) + << " are only supported in " << toString(minSupportedHalVersion) + << " and later (validating using " << toString(halVersion) << ")"; + return ANEURALNETWORKS_BAD_DATA; + } + return ANEURALNETWORKS_NO_ERROR; +} + +// Checks if two operands have the same types, ranks (if specified), dimensions +// (if specified), scales, zeroPoints, and extraParams. +static bool compatible(const Operand& a, const Operand& b) { + NN_RET_CHECK(a.type == b.type) << toString(a.type) << " != " << toString(b.type); + if (a.dimensions.size() != 0 && b.dimensions.size() != 0) { + NN_RET_CHECK_EQ(a.dimensions.size(), b.dimensions.size()) << "Incompatible dimensions"; + for (uint32_t i = 0, n = a.dimensions.size(); i < n; ++i) { + if (a.dimensions[i] != 0 && b.dimensions[i] != 0) { + NN_RET_CHECK_EQ(a.dimensions[i], b.dimensions[i]) << "Incompatible dimensions"; + } + } + } + NN_RET_CHECK_EQ(a.scale, b.scale); + NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint); + NN_RET_CHECK(a.extraParams == b.extraParams) + << toString(a.extraParams) << " != " << toString(b.extraParams); + return true; +} + +static bool validateConditionOperand(const Operand& operand) { + NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8) + << "Unexpected condition operand type: " << toString(operand.type); + NN_RET_CHECK_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton"; + NN_RET_CHECK_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton"; + return true; +} + +static void checkSubgraphValidationHelper(const SubgraphValidationHelper& helper) { + CHECK(helper.isValidSubgraphReference != nullptr); + CHECK(helper.getSubgraphInputCount != nullptr); + CHECK(helper.getSubgraphOutputCount != nullptr); + CHECK(helper.getSubgraphInputOperand != nullptr); + CHECK(helper.getSubgraphOutputOperand != nullptr); +} + +static bool validateIfOperation(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, + const uint32_t* outputs, const std::vector<Operand>& operands, + const SubgraphValidationHelper& helper) { + namespace op = operation_if; + checkSubgraphValidationHelper(helper); + NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_IF must have at least 3 inputs"; + NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_IF must have at least 1 output"; + auto validateBranchOperand = [&](const Operand& branchModelOperand) -> bool { + NN_RET_CHECK(helper.isValidSubgraphReference(branchModelOperand)) + << "Operand is not a valid subgraph reference"; + const uint32_t branchModelInputCount = helper.getSubgraphInputCount(branchModelOperand); + const uint32_t branchModelOutputCount = helper.getSubgraphOutputCount(branchModelOperand); + NN_RET_CHECK_EQ(inputCount, op::kFirstInput + branchModelInputCount); + NN_RET_CHECK_EQ(outputCount, branchModelOutputCount); + for (uint32_t i = 0; i < branchModelInputCount; ++i) { + const Operand& innerOperand = *helper.getSubgraphInputOperand(branchModelOperand, i); + const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + } + for (uint32_t i = 0; i < branchModelOutputCount; ++i) { + const Operand& innerOperand = *helper.getSubgraphOutputOperand(branchModelOperand, i); + const Operand& outerOperand = operands[outputs[i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + } + return true; + }; + NN_RET_CHECK(validateConditionOperand(operands[inputs[op::kCondBoolOperand]])) + << "Validation failed for IF condition operand"; + NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kThenModelOperand]])) + << "Validation failed for IF then model"; + NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kElseModelOperand]])) + << "Validation failed for IF else model"; + return true; +} + +static bool validateControlFlowOperandUnknownSize(const SubgraphValidationHelper& helper, + const Operand& operand) { + if (!helper.allowControlFlowOperationWithOperandOfUnknownSize && + !isExtensionOperandType(operand.type)) { + NN_RET_CHECK_NE(nonExtensionOperandSizeOfData(operand.type, operand.dimensions), 0u); + } + return true; +} + +static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs, + uint32_t outputCount, const uint32_t* outputs, + const std::vector<Operand>& operands, + const SubgraphValidationHelper& helper) { + // Let the loop have + // - m >= 1 input-output operands, + // - k >= 0 state-only operands, and + // - n >= 0 input-only operands. + // Then + // - the WHILE loop operation has (2 + m + k + n) inputs and m outputs. + // - the condition model has (m + k + n) inputs and 1 output. + // - the body model has (m + k + n) inputs and (m + k) outputs. + namespace op = operation_while; + checkSubgraphValidationHelper(helper); + NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_WHILE must have at least 3 inputs"; + NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_WHILE must have at least 1 output"; + auto validateCondOperand = [&](const Operand& condModelOperand) -> bool { + NN_RET_CHECK(helper.isValidSubgraphReference(condModelOperand)) + << "Operand is not a valid subgraph reference"; + const uint32_t condModelInputCount = helper.getSubgraphInputCount(condModelOperand); + const uint32_t condModelOutputCount = helper.getSubgraphOutputCount(condModelOperand); + NN_RET_CHECK_EQ(inputCount, op::kFirstInput + condModelInputCount); + NN_RET_CHECK_EQ(condModelOutputCount, 1u); + for (uint32_t i = 0; i < condModelInputCount; ++i) { + const Operand& innerOperand = *helper.getSubgraphInputOperand(condModelOperand, i); + const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); + } + NN_RET_CHECK( + validateConditionOperand(*helper.getSubgraphOutputOperand(condModelOperand, 0))); + return true; + }; + auto validateBodyOperand = [&](const Operand& bodyModelOperand) -> bool { + NN_RET_CHECK(helper.isValidSubgraphReference(bodyModelOperand)) + << "Operand is not a valid subgraph reference"; + const uint32_t bodyModelInputCount = helper.getSubgraphInputCount(bodyModelOperand); + const uint32_t bodyModelOutputCount = helper.getSubgraphOutputCount(bodyModelOperand); + NN_RET_CHECK_EQ(inputCount, op::kFirstInput + bodyModelInputCount); + NN_RET_CHECK_GE(bodyModelOutputCount, outputCount); + NN_RET_CHECK_GE(bodyModelInputCount, bodyModelOutputCount); + const uint32_t inputOutputCount = outputCount; + const uint32_t stateOnlyCount = bodyModelOutputCount - inputOutputCount; + const uint32_t inputOnlyCount = bodyModelInputCount - bodyModelOutputCount; + for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount + inputOnlyCount; i < n; ++i) { + const Operand& innerOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i); + const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); + } + for (uint32_t i = 0; i < inputOutputCount; ++i) { + const Operand& innerOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i); + const Operand& outerOperand = operands[outputs[i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); + } + for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount; i < n; ++i) { + const Operand& inputOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i); + const Operand& outputOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i); + NN_RET_CHECK(compatible(inputOperand, outputOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outputOperand)); + } + return true; + }; + NN_RET_CHECK(validateCondOperand(operands[inputs[op::kCondModelOperand]])) + << "Validation failed for WHILE condition model"; + NN_RET_CHECK(validateBodyOperand(operands[inputs[op::kBodyModelOperand]])) + << "Validation failed for WHILE body model"; + return true; +} + +static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, + const uint32_t* inputIndexes, uint32_t outputCount, + const uint32_t* outputIndexes, + const std::vector<hal::Operand>& operands, + HalVersion halVersion) { + if (opType == ANEURALNETWORKS_IF || opType == ANEURALNETWORKS_WHILE) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + LOG(ERROR) << "This validateOperation() overload does not support control flow"; + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperation(opType, inputCount, inputIndexes, outputCount, outputIndexes, operands, + halVersion, {}); +} + +int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, + const uint32_t* inputIndexes, uint32_t outputCount, + const uint32_t* outputIndexes, const std::vector<Operand>& operands, + HalVersion halVersion, const SubgraphValidationHelper& helper) { + NN_RETURN_IF_ERROR(validateOperandList(inputCount, inputIndexes, + static_cast<uint32_t>(operands.size()), + "ANeuralNetworksModel_addOperation inputs")); + NN_RETURN_IF_ERROR(validateOperandList(outputCount, outputIndexes, + static_cast<uint32_t>(operands.size()), + "ANeuralNetworksModel_addOperation outputs")); + + if (isExtensionOperationType(opType)) { + if (halVersion < HalVersion::V1_2) { + LOG(ERROR) + << "Extension operations are supported since HAL version 1.2, validating using " + << toString(halVersion); + return ANEURALNETWORKS_BAD_DATA; + } + // There is no other validation we can do for an extension operation. + return ANEURALNETWORKS_NO_ERROR; + } + + auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn + << ") or output operands (" << outputCount << ", expected " << expOut + << ") for operation " << getOperationName(opType); + }; + + switch (opType) { + case ANEURALNETWORKS_OEM_OPERATION: { + return ANEURALNETWORKS_NO_ERROR; + } + case ANEURALNETWORKS_RESHAPE: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + OperandType::TENSOR_INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + const auto inputRank = operands[inputIndexes[0]].dimensions.size(); + if (inputRank > 4) { + LOG(ERROR) << "Unsupported input tensor rank for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_DEPTH_TO_SPACE: { + if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 3 or 2) or output operands (" << outputCount + << ", expected 1) for operation " << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + if (inputCount == 3) { + inExpectedTypes.push_back(OperandType::BOOL); + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_SPACE_TO_DEPTH: { + if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 3 or 2) or output operands (" << outputCount + << ", expected 1) for operation " << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + if (inputCount == 3) { + inExpectedTypes.push_back(OperandType::BOOL); + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_EMBEDDING_LOOKUP: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[1]].type; + if (inputType != OperandType::TENSOR_FLOAT16 && + inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_INT32 && + inputType != OperandType::TENSOR_QUANT8_ASYMM && + inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32, inputType}; + std::vector<OperandType> outExpectedTypes = {inputType}; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else if (inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_HASHTABLE_LOOKUP: { + if (inputCount != 3 || outputCount != 2) { + logInvalidInOutNumber(3, 2); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[2]].type; + if (inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_INT32 && + inputType != OperandType::TENSOR_QUANT8_ASYMM) { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, inputType}; + std::vector<OperandType> outExpectedTypes = {inputType, + OperandType::TENSOR_QUANT8_ASYMM}; + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_LSH_PROJECTION: { + if (inputCount != 4 || outputCount != 1) { + logInvalidInOutNumber(4, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[1]].type; + if (inputType != OperandType::TENSOR_FLOAT16 && + inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_INT32 && + inputType != OperandType::TENSOR_QUANT8_ASYMM) { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + auto hashType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + if (hashType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + inputType, + OperandType::TENSOR_FLOAT16, + OperandType::INT32, + }; + } else if (hashType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + inputType, + OperandType::TENSOR_FLOAT32, + OperandType::INT32, + }; + } else { + LOG(ERROR) << "Unsupported hash tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM: { + const uint32_t kNumOutputs = 2; + const uint32_t kNumOutputsMerged = 1; + const uint32_t kNumOutputsWithState = 6; + const uint32_t kNumOutputsMergedWithState = 5; + if (inputCount != 61 || + (outputCount != kNumOutputs && outputCount != kNumOutputsMerged && + outputCount != kNumOutputsWithState && + outputCount != kNumOutputsMergedWithState)) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 61) or output operands (" << outputCount + << ", expected 1, 2, 5 or 6) for operation " << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + + std::vector<OperandType> inExpectedTypes; + auto inputType = operands[inputIndexes[0]].type; + if (inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_FLOAT16) { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + + inExpectedTypes = {}; + for (int i = 0; i < 48; ++i) { + inExpectedTypes.push_back(inputType); + } + inExpectedTypes.push_back(OperandType::INT32); + inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 + ? OperandType::FLOAT32 + : OperandType::FLOAT16); + inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 + ? OperandType::FLOAT32 + : OperandType::FLOAT16); + inExpectedTypes.push_back(OperandType::BOOL); + inExpectedTypes.push_back(OperandType::BOOL); + for (int i = 0; i < 8; ++i) { + inExpectedTypes.push_back(inputType); + } + + HalVersion minSupportedHalVersion = HalVersion::V1_2; + if (outputCount == kNumOutputsWithState || outputCount == kNumOutputsMergedWithState) { + minSupportedHalVersion = HalVersion::V1_3; + } + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, minSupportedHalVersion)); + std::vector<OperandType> outExpectedTypes(outputCount, inputType); + auto status = validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + return status; + } + case ANEURALNETWORKS_LSTM: { + if ((inputCount != 23 && inputCount != 27) || outputCount != 4) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 23 or 27) or output operands (" << outputCount + << ", expected 4) for operation " << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + auto inputType = operands[inputIndexes[0]].type; + if (inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_FLOAT16) { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + + inExpectedTypes = {inputType, inputType, inputType, inputType, inputType, + inputType, inputType, inputType, inputType, inputType, + inputType, inputType, inputType, inputType, inputType, + inputType, inputType, inputType, inputType, inputType, + OperandType::INT32}; + if (inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes.push_back(OperandType::FLOAT32); + inExpectedTypes.push_back(OperandType::FLOAT32); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes.push_back(OperandType::FLOAT16); + inExpectedTypes.push_back(OperandType::FLOAT16); + } + + outExpectedTypes = {inputType, inputType, inputType, inputType}; + if (inputCount == 23) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + for (int i = 0; i < 4; ++i) { + inExpectedTypes.push_back(inputType); + } + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_QUANTIZED_16BIT_LSTM: { + if (inputCount != 15 || outputCount != 2) { + logInvalidInOutNumber(15, 2); + return ANEURALNETWORKS_BAD_DATA; + } + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + std::vector<OperandType> inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM, + OperandType::TENSOR_QUANT8_ASYMM}; + std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM, + OperandType::TENSOR_QUANT8_ASYMM}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_RANDOM_MULTINOMIAL: { + if (inputCount != 3 || outputCount != 1) { + logInvalidInOutNumber(3, 1); + return ANEURALNETWORKS_BAD_DATA; + } + OperandType inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + inputType, + OperandType::INT32, + OperandType::TENSOR_INT32, + }; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_RNN: { + if (inputCount != 6 || outputCount != 2) { + logInvalidInOutNumber(6, 2); + return ANEURALNETWORKS_BAD_DATA; + } + OperandType inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_FLOAT32, OperandType::INT32, + }; + outExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_FLOAT32, + }; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_FLOAT16, OperandType::INT32, + }; + outExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_FLOAT16, + }; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_SVDF: { + if (inputCount != 7 || outputCount != 2) { + logInvalidInOutNumber(7, 2); + return ANEURALNETWORKS_BAD_DATA; + } + OperandType inputType = operands[inputIndexes[0]].type; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + std::vector<OperandType> inExpectedTypes = { + inputType, inputType, inputType, inputType, + inputType, OperandType::INT32, OperandType::INT32, + }; + std::vector<OperandType> outExpectedTypes = {inputType, inputType}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_BATCH_TO_SPACE_ND: { + if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 3 or 2) or output operands (" << outputCount + << ", expected 1) for operation " << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + if (inputCount == 3) { + inExpectedTypes.push_back(OperandType::BOOL); + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_SPACE_TO_BATCH_ND: { + if ((inputCount != 4 && inputCount != 3) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 4 or 3) or output operands (" << outputCount + << ", expected 1) for operation " << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + if (operands[inputIndexes[0]].zeroPoint != 0) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + if (inputCount == 4) { + inExpectedTypes.push_back(OperandType::BOOL); + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_PAD: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + if (operands[inputIndexes[0]].zeroPoint == 0) { + NN_RETURN_IF_ERROR( + validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } else { + NN_RETURN_IF_ERROR( + validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + } + inExpectedTypes = { + inputType, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + const auto inputRank = operands[inputIndexes[0]].dimensions.size(); + if (inputRank > 4) { + LOG(ERROR) << "Unsupported input tensor rank for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_PAD_V2: { + if (inputCount != 3 || outputCount != 1) { + logInvalidInOutNumber(3, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_INT32, + OperandType::FLOAT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_INT32, + OperandType::FLOAT16, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + inExpectedTypes = { + inputType, + OperandType::TENSOR_INT32, + OperandType::INT32, + }; // TODO(b/116699425): Make it UINT8. + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + const auto inputRank = operands[inputIndexes[0]].dimensions.size(); + if (inputRank > 4) { + LOG(ERROR) << "Unsupported input tensor rank for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_CAST: { + if (inputCount != 1 || outputCount != 1) { + logInvalidInOutNumber(1, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputOperand = operands[inputIndexes[0]]; + auto outputOperand = operands[outputIndexes[0]]; + auto inputType = inputOperand.type; + auto outputType = outputOperand.type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if ((inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM) && + (outputType == OperandType::TENSOR_FLOAT16 || + outputType == OperandType::TENSOR_FLOAT32 || + outputType == OperandType::TENSOR_INT32 || + outputType == OperandType::TENSOR_QUANT8_ASYMM)) { + inExpectedTypes = {inputType}; + outExpectedTypes = {outputType}; + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else if (inputType == OperandType::TENSOR_BOOL8 || + inputType == OperandType::TENSOR_QUANT16_ASYMM || + inputType == OperandType::TENSOR_QUANT16_SYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || + inputType == OperandType::TENSOR_QUANT8_SYMM) { + inExpectedTypes = {inputType}; + outExpectedTypes = {inputType}; // Only identity CAST is supported. + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + LOG(ERROR) << "Unsupported data type for operation " << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + // Validate that output shape is equal to input shape if dimensions + // are already known. + auto getNumberOfElements = [](const hardware::hidl_vec<uint32_t>& dims) { + if (dims.size() == 0) { + return 0; + } + return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<>()); + }; + if (inputOperand.dimensions.size() != 0 && outputOperand.dimensions.size() != 0 && + getNumberOfElements(outputOperand.dimensions) != 0 && + inputOperand.dimensions != outputOperand.dimensions) { + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_MEAN: { + if (inputCount != 3 || outputCount != 1) { + logInvalidInOutNumber(3, 1); + return ANEURALNETWORKS_BAD_DATA; + } + const auto inputRank = operands[inputIndexes[0]].dimensions.size(); + if (inputRank > 4) { + LOG(ERROR) << "Unsupported input tensor rank for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + std::vector<OperandType> inExpectedTypes = {inputType, OperandType::TENSOR_INT32, + OperandType::INT32}; + std::vector<OperandType> outExpectedTypes = {inputType}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_ARGMAX: + case ANEURALNETWORKS_ARGMIN: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + inExpectedTypes = {inputType, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_INT32}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_EXPAND_DIMS: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + inExpectedTypes = {inputType, OperandType::INT32}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_SPLIT: { + if (inputCount != 3) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)" + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + if (inputType != OperandType::TENSOR_FLOAT16 && + inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_INT32 && + inputType != OperandType::TENSOR_QUANT8_ASYMM && + inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + std::vector<OperandType> inExpectedTypes = {inputType, OperandType::INT32, + OperandType::INT32}; + std::vector<OperandType> outExpectedTypes(outputCount, inputType); + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_MAXIMUM: + case ANEURALNETWORKS_MINIMUM: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + OperandType inputType = operands[inputIndexes[0]].type; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + inExpectedTypes = {inputType, inputType}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_GROUPED_CONV_2D: { + if ((inputCount != 12 && inputCount != 9) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 12 or 9) or output operands (" << outputCount + << ", expected 1) for operation " << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + auto filterType = operands[inputIndexes[1]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_FLOAT32, OperandType::INT32, + OperandType::INT32, OperandType::INT32, + OperandType::INT32, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_FLOAT16, OperandType::INT32, + OperandType::INT32, OperandType::INT32, + OperandType::INT32, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + if (filterType != inputType && + filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { + LOG(ERROR) << "Unsupported filter tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + + if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL && + operands[inputIndexes[1]].extraParams.channelQuant().channelDim != 0) { + LOG(ERROR) << "Unsupported filter tensor channel dimension for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + + inExpectedTypes = { + inputType, filterType, OperandType::TENSOR_INT32, + OperandType::INT32, OperandType::INT32, OperandType::INT32, + OperandType::INT32, OperandType::INT32}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + + if (inputCount == 12) { + std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32); + inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(), + explicitScalarTypes.end()); + } + inExpectedTypes.push_back(OperandType::BOOL); + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_TILE: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + inExpectedTypes = {inputType, OperandType::TENSOR_INT32}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_POW: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector<OperandType> inExpectedTypes; + std::vector<OperandType> outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes = {inputType, inputType}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " + << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_IF: { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + return validateIfOperation(inputCount, inputIndexes, outputCount, outputIndexes, + operands, helper) + ? ANEURALNETWORKS_NO_ERROR + : ANEURALNETWORKS_BAD_DATA; + } + case ANEURALNETWORKS_WHILE: { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + return validateWhileOperation(inputCount, inputIndexes, outputCount, outputIndexes, + operands, helper) + ? ANEURALNETWORKS_NO_ERROR + : ANEURALNETWORKS_BAD_DATA; + } + default: { + const OperationRegistration* operationRegistration = + BuiltinOperationResolver::get()->findOperation( + static_cast<OperationType>(opType)); + if (operationRegistration == nullptr) { + if (0 <= opType && opType < kNumberOfOperationTypes) { + LOG(ERROR) << getOperationName(opType) << " not registered"; + } else { + LOG(ERROR) << "Operation type " << opType << " out of the range [0, " + << kNumberOfOperationTypes << ")"; + } + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + if (operationRegistration->validate == nullptr) { + LOG(ERROR) << "Incomplete operation registration: " << getOperationName(opType); + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + OperationValidationContext context(operationRegistration->name, inputCount, + inputIndexes, outputCount, outputIndexes, + operands.data(), halVersion); + if (!operationRegistration->validate(&context)) { + LOG(ERROR) << "Validation failed for operation " << getOperationName(opType); + return ANEURALNETWORKS_BAD_DATA; + } + return ANEURALNETWORKS_NO_ERROR; + } + } +} + +ErrorStatus convertResultCodeToErrorStatus(int resultCode) { + switch (resultCode) { + case ANEURALNETWORKS_NO_ERROR: + return ErrorStatus::NONE; + + case ANEURALNETWORKS_BAD_DATA: + case ANEURALNETWORKS_UNEXPECTED_NULL: + return ErrorStatus::INVALID_ARGUMENT; + + case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE: + return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; + + case ANEURALNETWORKS_UNAVAILABLE_DEVICE: + return ErrorStatus::DEVICE_UNAVAILABLE; + + case ANEURALNETWORKS_BAD_STATE: + case ANEURALNETWORKS_INCOMPLETE: + case ANEURALNETWORKS_OP_FAILED: + case ANEURALNETWORKS_OUT_OF_MEMORY: + case ANEURALNETWORKS_UNMAPPABLE: + case ANEURALNETWORKS_DEAD_OBJECT: + return ErrorStatus::GENERAL_FAILURE; + + case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT: + return ErrorStatus::MISSED_DEADLINE_TRANSIENT; + case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT: + return ErrorStatus::MISSED_DEADLINE_PERSISTENT; + case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT: + return ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT; + case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT: + return ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT; + } + LOG(ERROR) << "Unknown result code " << resultCode << " mapped to ErrorStatus::GENERAL_FAILURE"; + return ErrorStatus::GENERAL_FAILURE; +} + +int convertErrorStatusToResultCode(ErrorStatus status) { + switch (status) { + case ErrorStatus::NONE: + return ANEURALNETWORKS_NO_ERROR; + case ErrorStatus::DEVICE_UNAVAILABLE: + return ANEURALNETWORKS_UNAVAILABLE_DEVICE; + case ErrorStatus::GENERAL_FAILURE: + return ANEURALNETWORKS_OP_FAILED; + case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: + return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE; + case ErrorStatus::INVALID_ARGUMENT: + return ANEURALNETWORKS_BAD_DATA; + case ErrorStatus::MISSED_DEADLINE_TRANSIENT: + return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT; + case ErrorStatus::MISSED_DEADLINE_PERSISTENT: + return ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT; + case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: + return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT; + case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: + return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT; + } + LOG(ERROR) << "Unknown ErrorStatus " << toString(status) + << " mapped to ANEURALNETWORKS_OP_FAILED"; + return ANEURALNETWORKS_OP_FAILED; +} + +std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult( + ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing) { + constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(), + std::numeric_limits<uint64_t>::max()}; + const int n = convertErrorStatusToResultCode(status); + if (status != ErrorStatus::NONE && status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE && + !outputShapes.empty()) { + LOG(ERROR) << "The driver returned OutputShapes when it shouldn't."; + outputShapes.clear(); + } + if (status != ErrorStatus::NONE && timing != kNoTiming) { + LOG(ERROR) << "The driver returned Timing when it shouldn't."; + timing = kNoTiming; + } + return {n, std::move(outputShapes), timing}; +} + +std::optional<std::vector<uint32_t>> combineDimensions(const std::vector<uint32_t>& lhs, + const std::vector<uint32_t>& rhs) { + if (rhs.empty()) return lhs; + if (lhs.empty()) return rhs; + if (lhs.size() != rhs.size()) { + LOG(ERROR) << "Incompatible ranks: " << toString(lhs) << " and " << toString(rhs); + return std::nullopt; + } + std::vector<uint32_t> combined = lhs; + for (uint32_t i = 0; i < lhs.size(); i++) { + if (lhs[i] == 0) { + combined[i] = rhs[i]; + } else if (rhs[i] != 0 && lhs[i] != rhs[i]) { + LOG(ERROR) << "Incompatible dimensions: " << toString(lhs) << " and " << toString(rhs); + return std::nullopt; + } + } + return combined; +} + +// Capabilities::operandPerformance utilities. +// The field Capabilities::operandPerformance is a vector sorted by the field +// Capabilities::OperandPerformance::type. + +template <HalVersion version> +hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance( + PerformanceInfo perf) { + using OpPerf = VersionedOperandPerformance<version>; + + // Note: range presents enumerators in declaration order, not in numerical order. + static constexpr hidl_enum_range<VersionedOperandType<version>> kOperandTypeRange; + + std::vector<OpPerf> ret; + ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin()); + for (VersionedOperandType<version> type : kOperandTypeRange) { + if (static_cast<OperandType>(type) != OperandType::SUBGRAPH) { + ret.push_back(OpPerf{type, perf}); + } + } + std::sort(ret.begin(), ret.end(), + [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; }); + + return ret; +} + +template hal::hidl_vec<V1_2::Capabilities::OperandPerformance> +nonExtensionOperandPerformance<HalVersion::V1_2>(PerformanceInfo perf); +template hal::hidl_vec<V1_3::Capabilities::OperandPerformance> +nonExtensionOperandPerformance<HalVersion::V1_3>(PerformanceInfo perf); + +template <HalVersion version> +void update(hal::hidl_vec<VersionedOperandPerformance<version>>* operandPerformance, + VersionedOperandType<version> type, hal::PerformanceInfo perf) { + CHECK(operandPerformance != nullptr); + const auto it = + std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type, + [](const VersionedOperandPerformance<version>& perf, + VersionedOperandType<version> type) { return perf.type < type; }); + CHECK(it != operandPerformance->end()) + << toString(type) << " not in " << toString(*operandPerformance); + it->info = perf; +} + +void update(hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance, + V1_2::OperandType type, PerformanceInfo perf) { + update<HalVersion::V1_2>(operandPerformance, type, perf); +} +void update(hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance, + V1_3::OperandType type, PerformanceInfo perf) { + update<HalVersion::V1_3>(operandPerformance, type, perf); +} + +template <HalVersion version> +PerformanceInfo lookup(const hidl_vec<VersionedOperandPerformance<version>>& operandPerformance, + VersionedOperandType<version> type) { + const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type, + [](const VersionedOperandPerformance<version>& perf, + VersionedOperandType<version> type) { + return static_cast<OperandType>(perf.type) < + static_cast<OperandType>(type); + }); + if (it == operandPerformance.end()) { + LOG(WARNING) << "No PerformanceInfo for " << toString(type); + return kNoPerformanceInfo; + } else { + return it->info; + } +} + +PerformanceInfo lookup(const hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance, + V1_2::OperandType type) { + return lookup<HalVersion::V1_2>(operandPerformance, type); +} +PerformanceInfo lookup(const hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance, + V1_3::OperandType type) { + CHECK(type != V1_3::OperandType::SUBGRAPH) + << "Use Capabilities::ifPerformance or Capabilities::whilePerformance"; + return lookup<HalVersion::V1_3>(operandPerformance, type); +} + +// Versioning + +// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM. +// This array must be in sorted order. +static const OperandType kQuantized8PerformanceConsistentWithP[] = { + OperandType::INT32, OperandType::UINT32, OperandType::TENSOR_INT32, OperandType::OEM, + OperandType::TENSOR_OEM_BYTE}; + +static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) { + const PerformanceInfo quantized8Performance = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM); + return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP), + std::end(kQuantized8PerformanceConsistentWithP), + [quantized8Performance, &capabilities](OperandType type) { + return quantized8Performance == + lookup(capabilities.operandPerformance, + static_cast<V1_2::OperandType>(type)); + }); +} + +static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) { + const PerformanceInfo quantized8Performance = + lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM); + return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP), + std::end(kQuantized8PerformanceConsistentWithP), + [quantized8Performance, &capabilities](OperandType type) { + return quantized8Performance == + lookup(capabilities.operandPerformance, type); + }); +} + +static hidl_vec<V1_2::Capabilities::OperandPerformance> makeQuantized8PerformanceConsistentWithP( + PerformanceInfo quantized8Performance) { + hidl_vec<V1_2::Capabilities::OperandPerformance> ret( + std::size(kQuantized8PerformanceConsistentWithP)); + std::transform( + std::begin(kQuantized8PerformanceConsistentWithP), + std::end(kQuantized8PerformanceConsistentWithP), ret.begin(), + [quantized8Performance](OperandType type) -> V1_2::Capabilities::OperandPerformance { + return {static_cast<V1_2::OperandType>(type), quantized8Performance}; + }); + return ret; +} + +bool compliantWithV1_0(const V1_0::Capabilities&) { + return true; +} + +bool compliantWithV1_0(const V1_1::Capabilities& capabilities) { + return capabilities.relaxedFloat32toFloat16Performance == capabilities.float32Performance; +} + +bool compliantWithV1_0(const V1_2::Capabilities& capabilities) { + const PerformanceInfo perfTensorFloat32 = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32); + const PerformanceInfo perfFloat32 = + lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32); + if (perfTensorFloat32 != perfFloat32 || + perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor || + perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) { + return false; + } + + return isQuantized8PerformanceConsistentWithP(capabilities); +} + +bool compliantWithV1_0(const V1_3::Capabilities& capabilities) { + const PerformanceInfo perfTensorFloat32 = + lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32); + const PerformanceInfo perfFloat32 = + lookup(capabilities.operandPerformance, OperandType::FLOAT32); + if (perfTensorFloat32 != perfFloat32 || + perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor || + perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) { + return false; + } + + return isQuantized8PerformanceConsistentWithP(capabilities); +} + +bool compliantWithV1_1(const V1_0::Capabilities&) { + return true; +} + +bool compliantWithV1_1(const V1_1::Capabilities&) { + return true; +} + +bool compliantWithV1_1(const V1_2::Capabilities& capabilities) { + if ((capabilities.relaxedFloat32toFloat16PerformanceTensor != + capabilities.relaxedFloat32toFloat16PerformanceScalar) || + (lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32) != + lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32))) { + return false; + } + + return isQuantized8PerformanceConsistentWithP(capabilities); +} + +bool compliantWithV1_1(const V1_3::Capabilities& capabilities) { + if ((capabilities.relaxedFloat32toFloat16PerformanceTensor != + capabilities.relaxedFloat32toFloat16PerformanceScalar) || + (lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32) != + lookup(capabilities.operandPerformance, OperandType::FLOAT32))) { + return false; + } + + return isQuantized8PerformanceConsistentWithP(capabilities); +} + +bool compliantWithV1_2(const V1_0::Capabilities&) { + return true; +} + +bool compliantWithV1_2(const V1_1::Capabilities&) { + return true; +} + +bool compliantWithV1_2(const V1_2::Capabilities&) { + return true; +} + +bool compliantWithV1_2(const V1_3::Capabilities&) { + return true; +} + +bool compliantWithV1_3(const V1_0::Capabilities&) { + return true; +} + +bool compliantWithV1_3(const V1_1::Capabilities&) { + return true; +} + +bool compliantWithV1_3(const V1_2::Capabilities&) { + return true; +} + +bool compliantWithV1_3(const V1_3::Capabilities&) { + return true; +} + +V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status) { + return status; +} + +V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status) { + switch (status) { + case V1_3::ErrorStatus::NONE: + return V1_0::ErrorStatus::NONE; + case V1_3::ErrorStatus::DEVICE_UNAVAILABLE: + return V1_0::ErrorStatus::DEVICE_UNAVAILABLE; + case V1_3::ErrorStatus::GENERAL_FAILURE: + return V1_0::ErrorStatus::GENERAL_FAILURE; + case V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: + return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; + case V1_3::ErrorStatus::INVALID_ARGUMENT: + return V1_0::ErrorStatus::INVALID_ARGUMENT; + case V1_3::ErrorStatus::MISSED_DEADLINE_TRANSIENT: + return V1_0::ErrorStatus::GENERAL_FAILURE; + case V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT: + return V1_0::ErrorStatus::GENERAL_FAILURE; + case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: + return V1_0::ErrorStatus::GENERAL_FAILURE; + case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: + return V1_0::ErrorStatus::GENERAL_FAILURE; + } + LOG(ERROR) << "Unknown ErrorStatus: " << toString(status) << " mapped to GENERAL_FAILURE"; + return V1_0::ErrorStatus::GENERAL_FAILURE; +} + +V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status) { + return static_cast<V1_3::ErrorStatus>(status); +} + +V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status) { + return status; +} + +static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) { + return static_cast<V1_0::OperationType>(type); +} + +static V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) { + return static_cast<V1_0::OperationType>(type); +} + +V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type) { + return static_cast<V1_0::OperationType>(type); +} + +static V1_1::OperationType convertToV1_1(V1_0::OperationType type) { + return static_cast<V1_1::OperationType>(type); +} + +static V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type) { + return static_cast<V1_1::OperationType>(type); +} + +V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type) { + return static_cast<V1_1::OperationType>(type); +} + +static V1_2::OperationType convertToV1_2(V1_0::OperationType type) { + return static_cast<V1_2::OperationType>(type); +} + +static V1_2::OperationType convertToV1_2(V1_1::OperationType type) { + return static_cast<V1_2::OperationType>(type); +} + +V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type) { + return static_cast<V1_2::OperationType>(type); +} + +static V1_3::OperationType convertToV1_3(V1_0::OperationType type) { + return static_cast<V1_3::OperationType>(type); +} + +static V1_3::OperationType convertToV1_3(V1_1::OperationType type) { + return static_cast<V1_3::OperationType>(type); +} + +static V1_3::OperationType convertToV1_3(V1_2::OperationType type) { + return static_cast<V1_3::OperationType>(type); +} + +V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) { + return capabilities; +} + +V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) { + if (!compliantWithV1_0(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_1::Capabilities to V1_0::Capabilities"; + } + return {.float32Performance = capabilities.float32Performance, + .quantized8Performance = capabilities.quantized8Performance}; +} + +V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities) { + if (!compliantWithV1_0(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_2::Capabilities to V1_0::Capabilities"; + } + return {.float32Performance = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32), + .quantized8Performance = lookup(capabilities.operandPerformance, + V1_2::OperandType::TENSOR_QUANT8_ASYMM)}; +} + +V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities) { + if (!compliantWithV1_0(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_3::Capabilities to V1_0::Capabilities"; + } + return {.float32Performance = + lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32), + .quantized8Performance = + lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM)}; +} + +V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) { + return {.float32Performance = capabilities.float32Performance, + .quantized8Performance = capabilities.quantized8Performance, + .relaxedFloat32toFloat16Performance = capabilities.float32Performance}; +} + +V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) { + return capabilities; +} + +V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities) { + if (!compliantWithV1_1(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_2::Capabilities to V1_1::Capabilities"; + } + return {.float32Performance = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32), + .quantized8Performance = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM), + .relaxedFloat32toFloat16Performance = + capabilities.relaxedFloat32toFloat16PerformanceTensor}; +} + +V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities) { + if (!compliantWithV1_1(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_3::Capabilities to V1_1::Capabilities"; + } + return {.float32Performance = + lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32), + .quantized8Performance = + lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM), + .relaxedFloat32toFloat16Performance = + capabilities.relaxedFloat32toFloat16PerformanceTensor}; +} + +V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities) { + V1_2::Capabilities ret = { + .relaxedFloat32toFloat16PerformanceScalar = capabilities.float32Performance, + .relaxedFloat32toFloat16PerformanceTensor = capabilities.float32Performance, + .operandPerformance = + makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)}; + auto& opPerf = ret.operandPerformance; + opPerf.resize(opPerf.size() + 2); + opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32, + capabilities.float32Performance}; + opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance}; + using OperandPerformance = V1_2::Capabilities::OperandPerformance; + std::sort(opPerf.begin(), opPerf.end(), + [](const OperandPerformance& a, const OperandPerformance& b) { + return a.type < b.type; + }); + return ret; +} + +V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities) { + V1_2::Capabilities ret = {.relaxedFloat32toFloat16PerformanceScalar = + capabilities.relaxedFloat32toFloat16Performance, + .relaxedFloat32toFloat16PerformanceTensor = + capabilities.relaxedFloat32toFloat16Performance, + .operandPerformance = makeQuantized8PerformanceConsistentWithP( + capabilities.quantized8Performance)}; + auto& opPerf = ret.operandPerformance; + opPerf.resize(opPerf.size() + 2); + opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32, + capabilities.float32Performance}; + opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance}; + using OperandPerformance = V1_2::Capabilities::OperandPerformance; + std::sort(opPerf.begin(), opPerf.end(), + [](const OperandPerformance& a, const OperandPerformance& b) { + return a.type < b.type; + }); + return ret; +} + +V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities) { + return capabilities; +} + +V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities) { + V1_2::Capabilities ret = { + .relaxedFloat32toFloat16PerformanceScalar = + capabilities.relaxedFloat32toFloat16PerformanceScalar, + .relaxedFloat32toFloat16PerformanceTensor = + capabilities.relaxedFloat32toFloat16PerformanceTensor, + }; + const auto& inputOpPerf = capabilities.operandPerformance; + hidl_vec<V1_3::Capabilities::OperandPerformance> opPerfSupported; + opPerfSupported.resize(inputOpPerf.size()); + auto last = + std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(), + [](V1_3::Capabilities::OperandPerformance opPerf) { + return validOperandType(static_cast<V1_2::OperandType>(opPerf.type)); + }); + opPerfSupported.resize(std::distance(opPerfSupported.begin(), last)); + + auto& convertedOpPerf = ret.operandPerformance; + convertedOpPerf.resize(opPerfSupported.size()); + std::transform(opPerfSupported.begin(), opPerfSupported.end(), convertedOpPerf.begin(), + [](V1_3::Capabilities::OperandPerformance opPerf) { + return V1_2::Capabilities::OperandPerformance{ + static_cast<V1_2::OperandType>(opPerf.type), opPerf.info}; + }); + return ret; +} + +V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities) { + return convertToV1_3(convertToV1_2(capabilities)); +} + +V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities) { + return convertToV1_3(convertToV1_2(capabilities)); +} + +V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities) { + V1_3::Capabilities ret = { + .relaxedFloat32toFloat16PerformanceScalar = + capabilities.relaxedFloat32toFloat16PerformanceScalar, + .relaxedFloat32toFloat16PerformanceTensor = + capabilities.relaxedFloat32toFloat16PerformanceTensor, + .ifPerformance = kNoPerformanceInfo, + .whilePerformance = kNoPerformanceInfo, + }; + auto& opPerf = ret.operandPerformance; + opPerf.resize(capabilities.operandPerformance.size()); + std::transform(capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(), + opPerf.begin(), [](V1_2::Capabilities::OperandPerformance opPerf) { + return V1_3::Capabilities::OperandPerformance{ + static_cast<V1_3::OperandType>(opPerf.type), opPerf.info}; + }); + return ret; +} + +V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities) { + return capabilities; +} + +static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) { + return {.type = uncheckedConvertToV1_0(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) { + return {.type = convertToV1_1(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0( + const hidl_vec<V1_1::Operation>& operations) { + hidl_vec<V1_0::Operation> result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); }); + return result; +} + +static hidl_vec<V1_1::Operation> convertToV1_1(const hidl_vec<V1_0::Operation>& operations) { + hidl_vec<V1_1::Operation> result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_0::Operation& operation) { return convertToV1_1(operation); }); + return result; +} + +bool compliantWithV1_0(const V1_3::Operand& operand) { + return validOperandType(static_cast<V1_0::OperandType>(operand.type)) && + (nonExtensionOperandTypeIsScalar(static_cast<int>(operand.type)) || + operand.dimensions.size() != 0) && + compliantWithV1_0(operand.lifetime); +} + +bool compliantWithV1_2(const V1_3::Operand& operand) { + return validOperandType(static_cast<V1_2::OperandType>(operand.type)) && + compliantWithV1_0(operand.lifetime); +} + +bool compliantWithV1_3(const V1_3::Operand& operand) { + return true; +} + +static bool compliantWith(HalVersion version, const V1_3::Model& model, + std::set<uint32_t>* noncompliantOperations) { + // A boolean vector indicating whether each pool is compliant with the target HAL version. + std::vector<bool> isPoolCompliant(model.pools.size(), false); + std::transform(model.pools.begin(), model.pools.end(), isPoolCompliant.begin(), + [version](const hidl_memory& pool) { return validatePool(pool, version); }); + + // A boolean vector indicating whether each operand is compliant with the target HAL version. + std::vector<bool> isOperandCompliant(model.main.operands.size(), false); + std::transform(model.main.operands.begin(), model.main.operands.end(), + isOperandCompliant.begin(), [&isPoolCompliant, version](const Operand& op) { + bool is_operand_compliant = false; + switch (version) { + case HalVersion::UNKNOWN: + is_operand_compliant = false; + break; + case HalVersion::V1_0: + is_operand_compliant = compliantWithV1_0(op); + break; + case HalVersion::V1_1: + // There is no V1_1::Operand -- both V1_0::Model + // and V1_1::Model use V1_0::Operand. + is_operand_compliant = compliantWithV1_0(op); + break; + case HalVersion::V1_2: + is_operand_compliant = compliantWithV1_2(op); + break; + case HalVersion::V1_3: + is_operand_compliant = compliantWithV1_3(op); + break; + } + return is_operand_compliant && + !(op.lifetime == OperandLifeTime::CONSTANT_REFERENCE && + !isPoolCompliant[op.location.poolIndex]); + }); + + auto allOperandsCompliant = [&isOperandCompliant](const hidl_vec<uint32_t>& indices) { + return std::all_of( + indices.begin(), indices.end(), + [&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; }); + }; + + auto localValidateOperation = [&model, version, &allOperandsCompliant](const Operation& op) { + if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false; + int error = validateOperation( + static_cast<int32_t>(op.type), op.inputs.size(), + op.inputs.size() > 0 ? op.inputs.data() : nullptr, op.outputs.size(), + op.outputs.size() > 0 ? op.outputs.data() : nullptr, model.main.operands, version); + return error == ANEURALNETWORKS_NO_ERROR; + }; + + if (noncompliantOperations) { + CHECK(noncompliantOperations->empty()); + for (uint32_t idx = 0; idx < model.main.operations.size(); ++idx) { + if (!localValidateOperation(model.main.operations[idx])) { + noncompliantOperations->insert(idx); + } + } + return noncompliantOperations->empty(); + } else { + return std::all_of(model.main.operations.begin(), model.main.operations.end(), + localValidateOperation); + } +} + +bool compliantWithV1_0(const V1_0::Model& model) { + return true; +} + +bool compliantWithV1_0(const V1_1::Model& model) { + // In addition to new enumeration values being introduced in V1_1::Model, a + // new flag was introduced to indicate whether or not float32 data can be + // calculated using float16 units. This 'relaxComputationFloat32toFloat16' + // flag is not relevant in whether a V1_1::Model is compliant with a + // V1_0::Model because all 1.0 drivers require strict calculation by default + // in the P NN runtime. Even if fp16 calculations are allowed, they can + // still be computed by a strict fp32 driver. + return std::all_of( + model.operations.begin(), model.operations.end(), [&model](const V1_1::Operation& op) { + int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(), + op.inputs.size() > 0 ? op.inputs.data() : nullptr, + op.outputs.size(), + op.outputs.size() > 0 ? op.outputs.data() : nullptr, + convertToV1_3(model.operands), HalVersion::V1_0); + return error == ANEURALNETWORKS_NO_ERROR; + }); +} + +bool compliantWithV1_0(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) { + return compliantWith(HalVersion::V1_0, convertToV1_3(model), noncompliantOperations); +} + +bool compliantWithV1_0(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) { + return compliantWith(HalVersion::V1_0, model, noncompliantOperations); +} + +bool compliantWithV1_1(const V1_0::Model&) { + return true; +} + +bool compliantWithV1_1(const V1_1::Model&) { + return true; +} + +bool compliantWithV1_1(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) { + return compliantWith(HalVersion::V1_1, convertToV1_3(model), noncompliantOperations); +} + +bool compliantWithV1_1(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) { + return compliantWith(HalVersion::V1_1, model, noncompliantOperations); +} + +bool compliantWithV1_2(const V1_0::Model&) { + return true; +} + +bool compliantWithV1_2(const V1_1::Model&) { + return true; +} + +bool compliantWithV1_2(const V1_2::Model&, std::set<uint32_t>* noncompliantOperations) { + return true; +} + +bool compliantWithV1_2(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) { + return compliantWith(HalVersion::V1_2, model, noncompliantOperations); +} + +static V1_0::Operation uncheckedConvertToV1_0(const V1_2::Operation& operation) { + return {.type = uncheckedConvertToV1_0(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_0::Operation uncheckedConvertToV1_0(const V1_3::Operation& operation) { + return {.type = uncheckedConvertToV1_0(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) { + return {.type = uncheckedConvertToV1_1(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_1::Operation uncheckedConvertToV1_1(const V1_3::Operation& operation) { + return {.type = uncheckedConvertToV1_1(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) { + return {.type = convertToV1_2(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_2::Operation convertToV1_2(const V1_1::Operation& operation) { + return {.type = convertToV1_2(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_2::Operation uncheckedConvertToV1_2(const V1_3::Operation& operation) { + return {.type = uncheckedConvertToV1_2(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_3::Operation convertToV1_3(const V1_0::Operation& operation) { + return {.type = convertToV1_3(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_3::Operation convertToV1_3(const V1_1::Operation& operation) { + return {.type = convertToV1_3(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_3::Operation convertToV1_3(const V1_2::Operation& operation) { + return {.type = convertToV1_3(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0( + const hidl_vec<V1_3::Operation>& operations) { + hidl_vec<V1_0::Operation> result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); }); + return result; +} + +static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0( + const hidl_vec<V1_2::Operation>& operations) { + hidl_vec<V1_0::Operation> result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); }); + return result; +} + +static hidl_vec<V1_2::Operation> uncheckedConvertToV1_2( + const hidl_vec<V1_3::Operation>& operations) { + hidl_vec<V1_2::Operation> result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); }); + return result; +} + +static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1( + const hidl_vec<V1_2::Operation>& operations) { + hidl_vec<V1_1::Operation> result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); }); + return result; +} + +static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1( + const hidl_vec<V1_3::Operation>& operations) { + hidl_vec<V1_1::Operation> result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); }); + return result; +} + +static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_0::Operation>& operations) { + hidl_vec<V1_2::Operation> result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_0::Operation& operation) { return convertToV1_2(operation); }); + return result; +} + +static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_1::Operation>& operations) { + hidl_vec<V1_2::Operation> result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_1::Operation& operation) { return convertToV1_2(operation); }); + return result; +} + +static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_0::Operation>& operations) { + hidl_vec<V1_3::Operation> result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_0::Operation& operation) { return convertToV1_3(operation); }); + return result; +} + +static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_1::Operation>& operations) { + hidl_vec<V1_3::Operation> result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_1::Operation& operation) { return convertToV1_3(operation); }); + return result; +} + +static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_2::Operation>& operations) { + hidl_vec<V1_3::Operation> result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_2::Operation& operation) { return convertToV1_3(operation); }); + return result; +} + +static bool compliantWithV1_0(const V1_2::OperandType& operandType) { + return validOperandType(static_cast<V1_0::OperandType>(operandType)); +} + +static bool compliantWithV1_0(const V1_3::OperandType& operandType) { + return validOperandType(static_cast<V1_0::OperandType>(operandType)); +} + +static bool compliantWithV1_2(const V1_3::OperandType& operandType) { + return validOperandType(static_cast<V1_2::OperandType>(operandType)); +} + +V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) { + if (!compliantWithV1_0(operandType)) { + LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) + << " from V1_2::OperandType to V1_0::OperandType"; + } + return static_cast<V1_0::OperandType>(operandType); +} + +V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) { + return static_cast<V1_2::OperandType>(operandType); +} + +V1_2::OperandType convertToV1_2(const V1_3::OperandType& operandType) { + if (!compliantWithV1_2(operandType)) { + LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) + << " from V1_3::OperandType to V1_2::OperandType"; + } + return static_cast<V1_2::OperandType>(operandType); +} + +V1_0::OperandType convertToV1_0(const V1_3::OperandType& operandType) { + if (!compliantWithV1_0(operandType)) { + LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) + << " from V1_3::Operand to V1_0::Operand"; + } + return static_cast<V1_0::OperandType>(operandType); +} + +bool compliantWithV1_0(hal::V1_0::OperandLifeTime lifetime) { + return true; +} + +bool compliantWithV1_0(hal::V1_3::OperandLifeTime lifetime) { + return lifetime != V1_3::OperandLifeTime::SUBGRAPH; +} + +bool compliantWithV1_3(hal::V1_0::OperandLifeTime lifetime) { + return true; +} + +bool compliantWithV1_3(hal::V1_3::OperandLifeTime lifetime) { + return true; +} + +V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime) { + return lifetime; +} + +V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime) { + if (!compliantWithV1_0(lifetime)) { + LOG(ERROR) << "Upcasting non-compliant lifetime " << toString(lifetime) + << " from V1_3 to V1_0"; + } + return static_cast<V1_0::OperandLifeTime>(lifetime); +} + +V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime) { + return static_cast<V1_3::OperandLifeTime>(lifetime); +} + +V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime) { + return lifetime; +} + +V1_0::Operand convertToV1_0(const V1_2::Operand& operand) { + return {.type = convertToV1_0(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = convertToV1_0(operand.lifetime), + .location = operand.location}; +} + +V1_0::Operand convertToV1_0(const V1_3::Operand& operand) { + return {.type = convertToV1_0(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = convertToV1_0(operand.lifetime), + .location = operand.location}; +} + +V1_2::Operand convertToV1_2(const V1_0::Operand& operand) { + return {.type = convertToV1_2(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = operand.lifetime, + .location = operand.location}; +} + +V1_2::Operand convertToV1_2(const V1_3::Operand& operand) { + return {.type = convertToV1_2(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = static_cast<V1_0::OperandLifeTime>(operand.lifetime), + .location = operand.location, + .extraParams = operand.extraParams}; +} + +V1_3::Operand convertToV1_3(const V1_0::Operand& operand) { + return {.type = static_cast<V1_3::OperandType>(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = convertToV1_3(operand.lifetime), + .location = operand.location}; +} + +V1_3::Operand convertToV1_3(const V1_2::Operand& operand) { + return {.type = static_cast<V1_3::OperandType>(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = convertToV1_3(operand.lifetime), + .location = operand.location, + .extraParams = operand.extraParams}; +} + +V1_3::Operand convertToV1_3(const V1_3::Operand& operand) { + return operand; +} + +hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_0::Operand>& operands) { + return operands; +} + +hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_2::Operand>& operands) { + hidl_vec<V1_0::Operand> result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_2::Operand& operand) { return convertToV1_0(operand); }); + return result; +} + +hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_3::Operand>& operands) { + hidl_vec<V1_0::Operand> result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_3::Operand& operand) { return convertToV1_0(operand); }); + return result; +} + +hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_0::Operand>& operands) { + hidl_vec<V1_2::Operand> result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_0::Operand& operand) { return convertToV1_2(operand); }); + return result; +} + +hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_2::Operand>& operands) { + return operands; +} + +hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_3::Operand>& operands) { + hidl_vec<V1_2::Operand> result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_3::Operand& operand) { return convertToV1_2(operand); }); + return result; +} + +hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_0::Operand>& operands) { + hidl_vec<V1_3::Operand> result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_0::Operand& operand) { return convertToV1_3(operand); }); + return result; +} + +hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_2::Operand>& operands) { + hidl_vec<V1_3::Operand> result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_2::Operand& operand) { return convertToV1_3(operand); }); + return result; +} + +hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_3::Operand>& operands) { + return operands; +} + +V1_0::Model convertToV1_0(const V1_0::Model& model) { + return model; +} + +V1_0::Model convertToV1_0(const V1_1::Model& model) { + if (!compliantWithV1_0(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_1::Model to V1_0::Model"; + } + return {.operands = model.operands, + .operations = uncheckedConvertToV1_0(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools}; +} + +V1_0::Model convertToV1_0(const V1_2::Model& model) { + if (!compliantWithV1_0(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_2::Model to V1_0::Model"; + } + return {.operands = convertToV1_0(model.operands), + .operations = uncheckedConvertToV1_0(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools}; +} + +V1_0::Model convertToV1_0(const V1_3::Model& model) { + if (!compliantWithV1_0(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_3::Model to V1_0::Model"; + } + return {.operands = convertToV1_0(model.main.operands), + .operations = uncheckedConvertToV1_0(model.main.operations), + .inputIndexes = model.main.inputIndexes, + .outputIndexes = model.main.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools}; +} + +V1_1::Model convertToV1_1(const V1_0::Model& model) { + return {.operands = model.operands, + .operations = convertToV1_1(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = false}; +} + +V1_1::Model convertToV1_1(const V1_1::Model& model) { + return model; +} + +V1_1::Model convertToV1_1(const V1_2::Model& model) { + if (!compliantWithV1_1(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_2::Model to V1_1::Model"; + } + return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical. + .operations = uncheckedConvertToV1_1(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; +} + +V1_1::Model convertToV1_1(const V1_3::Model& model) { + if (!compliantWithV1_1(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_3::Model to V1_1::Model"; + } + return {// Operands in 1.1 and 1.0 are identical. + .operands = convertToV1_0(model.main.operands), + .operations = uncheckedConvertToV1_1(model.main.operations), + .inputIndexes = model.main.inputIndexes, + .outputIndexes = model.main.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; +} + +V1_2::Model convertToV1_2(const V1_0::Model& model) { + return {.operands = convertToV1_2(model.operands), + .operations = convertToV1_2(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = false}; +} + +V1_2::Model convertToV1_2(const V1_1::Model& model) { + return {.operands = convertToV1_2(model.operands), + .operations = convertToV1_2(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; +} + +V1_2::Model convertToV1_2(const V1_2::Model& model) { + return model; +} + +V1_2::Model convertToV1_2(const V1_3::Model& model) { + if (!compliantWithV1_2(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_3::Model to V1_2::Model"; + } + return {.operands = convertToV1_2(model.main.operands), + .operations = uncheckedConvertToV1_2(model.main.operations), + .inputIndexes = model.main.inputIndexes, + .outputIndexes = model.main.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, + .extensionNameToPrefix = model.extensionNameToPrefix}; +} + +V1_3::Model convertToV1_3(const V1_0::Model& model) { + return {.main = {.operands = convertToV1_3(model.operands), + .operations = convertToV1_3(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes}, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = false}; +} + +V1_3::Model convertToV1_3(const V1_1::Model& model) { + return {.main = {.operands = convertToV1_3(model.operands), + .operations = convertToV1_3(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes}, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; +} + +V1_3::Model convertToV1_3(const V1_2::Model& model) { + return {.main = {.operands = convertToV1_3(model.operands), + .operations = convertToV1_3(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes}, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, + .extensionNameToPrefix = model.extensionNameToPrefix}; +} + +V1_3::Model convertToV1_3(const V1_3::Model& model) { + return model; +} + +bool compliantWithV1_0(const V1_0::Request& request) { + return true; +} + +bool compliantWithV1_0(const V1_3::Request& request) { + return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) { + if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) { + return false; + } + const auto& name = pool.hidlMemory().name(); + return name == "ashmem" || name == "mmap_fd"; + }); +} + +bool compliantWithV1_2(const V1_3::Request& request) { + return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) { + if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) { + return false; + } + const auto& name = pool.hidlMemory().name(); + return name == "ashmem" || name == "mmap_fd" || name == "hardware_buffer_blob" || + name == "hardware_buffer"; + }); +} + +static hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) { + switch (pool.getDiscriminator()) { + case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory: + return pool.hidlMemory(); + case V1_3::Request::MemoryPool::hidl_discriminator::token: + return hidl_memory{}; + } +} + +static V1_3::Request::MemoryPool convertToV1_3(const hidl_memory& pool) { + V1_3::Request::MemoryPool ret; + ret.hidlMemory(pool); + return ret; +} + +V1_0::Request convertToV1_0(const V1_0::Request& request) { + return request; +} + +static V1_0::Request uncheckedConvertToV1_0(const V1_3::Request& request) { + hidl_vec<hidl_memory> pools(request.pools.size()); + std::transform(request.pools.begin(), request.pools.end(), pools.begin(), + [](const auto& pool) { return convertToV1_0(pool); }); + return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)}; +} + +V1_0::Request convertToV1_0(const V1_3::Request& request) { + if (!compliantWithV1_0(request)) { + LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request)) + << " from V1_3::Request to V1_0::Request of version 1.0"; + } + return uncheckedConvertToV1_0(request); +} + +V1_0::Request convertToV1_2(const V1_3::Request& request) { + if (!compliantWithV1_2(request)) { + LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request)) + << " from V1_3::Request to V1_0::Request of version 1.2"; + } + return uncheckedConvertToV1_0(request); +} + +V1_3::Request convertToV1_3(const V1_0::Request& request) { + hidl_vec<V1_3::Request::MemoryPool> pools(request.pools.size()); + std::transform(request.pools.begin(), request.pools.end(), pools.begin(), + [](const auto& pool) { return convertToV1_3(pool); }); + return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)}; +} + +V1_3::Request convertToV1_3(const V1_3::Request& request) { + return request; +} + +FenceState syncWait(int fd, int timeout) { + // This implementation is directly based on the ::sync_wait() implementation. + + struct pollfd fds; + int ret; + + if (fd < 0) { + errno = EINVAL; + return FenceState::UNKNOWN; + } + + fds.fd = fd; + fds.events = POLLIN; + + do { + ret = poll(&fds, 1, timeout); + if (ret > 0) { + if (fds.revents & POLLNVAL) { + errno = EINVAL; + return FenceState::UNKNOWN; + } + if (fds.revents & POLLERR) { + errno = EINVAL; + return FenceState::ERROR; + } + return FenceState::SIGNALED; + } else if (ret == 0) { + errno = ETIME; + return FenceState::ACTIVE; + } + } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); + + return FenceState::UNKNOWN; +} + +#ifdef NN_DEBUGGABLE +uint32_t getProp(const char* str, uint32_t defaultValue) { + const std::string propStr = android::base::GetProperty(str, ""); + if (propStr.size() > 0) { + return std::stoi(propStr); + } else { + return defaultValue; + } +} +#endif // NN_DEBUGGABLE + +} // namespace nn +} // namespace android
diff --git a/common/UtilsTest.cpp b/common/UtilsTest.cpp index 9365f70..4d5a32d 100644 --- a/common/UtilsTest.cpp +++ b/common/UtilsTest.cpp
@@ -13,22 +13,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include <gmock/gmock.h> +#include <gmock/gmock-matchers.h> #include <gtest/gtest.h> #include <limits> -#include <utility> #include <vector> -#include "HalInterfaces.h" -#include "MemoryUtils.h" #include "OperationsUtils.cpp" #include "QuantUtils.h" -#include "Utils.h" -#include "ValidateHal.h" -#include "nnapi/TypeUtils.h" -#include "nnapi/Types.h" namespace android { namespace nn { @@ -67,8 +59,9 @@ } static int32_t getExtensionType(uint16_t extensionPrefix, uint16_t typeWithinExtension) { - int32_t type = (extensionPrefix << kExtensionTypeBits) | typeWithinExtension; - EXPECT_TRUE(isExtensionOperandType(static_cast<V1_3::OperandType>(type))); + constexpr uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE); + int32_t type = (extensionPrefix << kLowBitsType) | typeWithinExtension; + EXPECT_TRUE(isExtensionOperandType(static_cast<OperandType>(type))); return type; } @@ -131,92 +124,6 @@ ANEURALNETWORKS_BAD_DATA); } -TEST(ValidateRequestTest, UnknownOutputRank) { - V1_3::Request::MemoryPool pool; - pool.hidlMemory(allocateSharedMemory(2 * sizeof(float))); - ASSERT_TRUE(pool.hidlMemory().valid()); - const V1_3::Model model = { - .main = - { - .operands = {{ - .type = V1_3::OperandType::TENSOR_FLOAT32, - .dimensions = {1}, - .numberOfConsumers = 1, - .lifetime = V1_3::OperandLifeTime::SUBGRAPH_INPUT, - }, - { - .type = V1_3::OperandType::TENSOR_FLOAT32, - .dimensions = {}, // unknown output rank - .numberOfConsumers = 0, - .lifetime = V1_3::OperandLifeTime::SUBGRAPH_OUTPUT, - }}, - .operations = {{ - .type = V1_3::OperationType::ABS, - .inputs = {0}, - .outputs = {1}, - }}, - .inputIndexes = {0}, - .outputIndexes = {1}, - }, - }; - const V1_3::Request request = { - .inputs = {{ - .location = {.poolIndex = 0, .offset = 0, .length = sizeof(float)}, - .dimensions = {}, - }}, - .outputs = {{ - .location = {.poolIndex = 0, .offset = sizeof(float), .length = sizeof(float)}, - .dimensions = {}, - }}, - .pools = {std::move(pool)}, - }; - EXPECT_FALSE(validateRequest(request, model, /*allowUnspecifiedOutput=*/false)); -} - -TEST(ValidateRequestTest, ScalarOutput) { - V1_3::Request::MemoryPool pool; - pool.hidlMemory(allocateSharedMemory(sizeof(float) + sizeof(int32_t))); - ASSERT_TRUE(pool.hidlMemory().valid()); - const V1_3::Model model = { - .main = - { - .operands = {{ - .type = V1_3::OperandType::TENSOR_FLOAT32, - .dimensions = {1}, - .numberOfConsumers = 1, - .lifetime = V1_3::OperandLifeTime::SUBGRAPH_INPUT, - }, - { - .type = V1_3::OperandType::INT32, - .dimensions = {}, - .numberOfConsumers = 0, - .lifetime = V1_3::OperandLifeTime::SUBGRAPH_OUTPUT, - }}, - .operations = {{ - .type = V1_3::OperationType::RANK, - .inputs = {0}, - .outputs = {1}, - }}, - .inputIndexes = {0}, - .outputIndexes = {1}, - }, - }; - const V1_3::Request request = { - .inputs = {{ - .location = {.poolIndex = 0, .offset = 0, .length = sizeof(float)}, - .dimensions = {}, - }}, - .outputs = {{ - .location = {.poolIndex = 0, - .offset = sizeof(float), - .length = sizeof(int32_t)}, - .dimensions = {}, - }}, - .pools = {std::move(pool)}, - }; - EXPECT_TRUE(validateRequest(request, model, /*allowUnspecifiedOutput=*/false)); -} - class CombineDimensionsTest : public ::testing::Test { protected: void testCompatible(const std::vector<uint32_t>& lhs, const std::vector<uint32_t>& rhs,
diff --git a/common/ValidateHal.cpp b/common/ValidateHal.cpp index b88c5da..e5b9b12 100644 --- a/common/ValidateHal.cpp +++ b/common/ValidateHal.cpp
@@ -29,11 +29,12 @@ #include "OperationsUtils.h" #include "Tracing.h" #include "Utils.h" -#include "nnapi/TypeUtils.h" namespace android { namespace nn { +using namespace hal; + template <class T_Model> struct ModelToHalVersion; template <> @@ -55,27 +56,27 @@ class MemoryAccessVerifier { public: - MemoryAccessVerifier(const hardware::hidl_vec<hardware::hidl_memory>& pools) + MemoryAccessVerifier(const hidl_vec<hidl_memory>& pools) : mPoolCount(pools.size()), mPoolSizes(mPoolCount) { for (size_t i = 0; i < mPoolCount; i++) { mPoolSizes[i] = pools[i].size(); } } - MemoryAccessVerifier(const hardware::hidl_vec<V1_3::Request::MemoryPool>& pools) + MemoryAccessVerifier(const hidl_vec<V1_3::Request::MemoryPool>& pools) : mPoolCount(pools.size()), mPoolSizes(mPoolCount) { for (size_t i = 0; i < mPoolCount; i++) { switch (pools[i].getDiscriminator()) { - case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory: + case Request::MemoryPool::hidl_discriminator::hidlMemory: mPoolSizes[i] = pools[i].hidlMemory().size(); break; - case V1_3::Request::MemoryPool::hidl_discriminator::token: + case Request::MemoryPool::hidl_discriminator::token: // Set size to 0 to enforce length == 0 && offset == 0. mPoolSizes[i] = 0; break; } } } - bool validate(const V1_0::DataLocation& location) const { + bool validate(const DataLocation& location) const { if (location.poolIndex >= mPoolCount) { LOG(ERROR) << "Invalid poolIndex " << location.poolIndex << "/" << mPoolCount; return false; @@ -98,29 +99,29 @@ static bool validateOperandExtraParams(const V1_3::Operand& operand, uint32_t index) { switch (operand.type) { - case V1_3::OperandType::FLOAT32: - case V1_3::OperandType::INT32: - case V1_3::OperandType::UINT32: - case V1_3::OperandType::BOOL: - case V1_3::OperandType::SUBGRAPH: - case V1_3::OperandType::TENSOR_FLOAT32: - case V1_3::OperandType::TENSOR_FLOAT16: - case V1_3::OperandType::TENSOR_INT32: - case V1_3::OperandType::TENSOR_QUANT8_ASYMM: - case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED: - case V1_3::OperandType::TENSOR_QUANT8_SYMM: - case V1_3::OperandType::TENSOR_QUANT16_ASYMM: - case V1_3::OperandType::TENSOR_QUANT16_SYMM: - case V1_3::OperandType::TENSOR_BOOL8: { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::BOOL: + case OperandType::SUBGRAPH: + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_FLOAT16: + case OperandType::TENSOR_INT32: + case OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: + case OperandType::TENSOR_QUANT8_SYMM: + case OperandType::TENSOR_QUANT16_ASYMM: + case OperandType::TENSOR_QUANT16_SYMM: + case OperandType::TENSOR_BOOL8: { NN_RET_CHECK(operand.extraParams.getDiscriminator() == - V1_2::Operand::ExtraParams::hidl_discriminator::none) + OperandExtraParams::hidl_discriminator::none) << "Operand " << index << ": Operand of type " << getOperandTypeName(operand.type) << " has incorrect extraParams: " << toString(operand.extraParams); } break; - case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: { + case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: { NN_RET_CHECK(operand.extraParams.getDiscriminator() == - V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant) + OperandExtraParams::hidl_discriminator::channelQuant) << "Operand " << index << ": Operand of type " << getOperandTypeName(operand.type) << " without a Channel Quantization params"; auto& channelQuant = operand.extraParams.channelQuant(); @@ -150,9 +151,9 @@ default: { if (isExtensionOperandType(operand.type)) { NN_RET_CHECK(operand.extraParams.getDiscriminator() == - V1_2::Operand::ExtraParams::hidl_discriminator::extension || + OperandExtraParams::hidl_discriminator::extension || operand.extraParams.getDiscriminator() == - V1_2::Operand::ExtraParams::hidl_discriminator::none) + OperandExtraParams::hidl_discriminator::none) << "Operand " << index << ": Extension operand of type " << getOperandTypeName(operand.type) << " has incorrect extraParams: " << toString(operand.extraParams); @@ -164,11 +165,10 @@ } template <typename VersionedOperand> -static bool validateOperands(const hardware::hidl_vec<VersionedOperand>& operands, - const hardware::hidl_vec<uint8_t>& operandValues, - const hardware::hidl_vec<hardware::hidl_memory>& pools, - const hardware::hidl_vec<V1_3::Subgraph>& subgraphs, - bool allowUnspecifiedRank) { +static bool validateOperands(const hidl_vec<VersionedOperand>& operands, + const hidl_vec<uint8_t>& operandValues, + const hidl_vec<hidl_memory>& pools, + const hidl_vec<Subgraph>& subgraphs, bool allowUnspecifiedRank) { uint32_t index = 0; MemoryAccessVerifier poolVerifier(pools); for (auto& versionedOperand : operands) { @@ -182,13 +182,13 @@ V1_3::Operand operand = convertToV1_3(versionedOperand); // Validate type and dimensions. switch (operand.type) { - case V1_3::OperandType::FLOAT16: - case V1_3::OperandType::FLOAT32: - case V1_3::OperandType::INT32: - case V1_3::OperandType::UINT32: - case V1_3::OperandType::BOOL: - case V1_3::OperandType::SUBGRAPH: - case V1_3::OperandType::OEM: { + case OperandType::FLOAT16: + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::BOOL: + case OperandType::SUBGRAPH: + case OperandType::OEM: { size_t count = operand.dimensions.size(); if (count != 0) { LOG(ERROR) << "Operand " << index << ": Scalar data has dimensions of rank " @@ -197,20 +197,19 @@ } break; } - case V1_3::OperandType::TENSOR_FLOAT16: - case V1_3::OperandType::TENSOR_FLOAT32: - case V1_3::OperandType::TENSOR_INT32: - case V1_3::OperandType::TENSOR_QUANT8_ASYMM: - case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED: - case V1_3::OperandType::TENSOR_QUANT8_SYMM: - case V1_3::OperandType::TENSOR_QUANT16_ASYMM: - case V1_3::OperandType::TENSOR_QUANT16_SYMM: - case V1_3::OperandType::TENSOR_BOOL8: - case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: - case V1_3::OperandType::TENSOR_OEM_BYTE: { - if ((!allowUnspecifiedRank || - operand.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY || - operand.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE) && + case OperandType::TENSOR_FLOAT16: + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + case OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: + case OperandType::TENSOR_QUANT8_SYMM: + case OperandType::TENSOR_QUANT16_ASYMM: + case OperandType::TENSOR_QUANT16_SYMM: + case OperandType::TENSOR_BOOL8: + case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + case OperandType::TENSOR_OEM_BYTE: { + if ((!allowUnspecifiedRank || operand.lifetime == OperandLifeTime::CONSTANT_COPY || + operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) && operand.dimensions.size() == 0) { LOG(ERROR) << "Operand " << index << ": Tensor has dimensions of rank 0"; return false; @@ -228,16 +227,16 @@ // Validate the scale. switch (operand.type) { - case V1_3::OperandType::FLOAT16: - case V1_3::OperandType::FLOAT32: - case V1_3::OperandType::INT32: - case V1_3::OperandType::UINT32: - case V1_3::OperandType::BOOL: - case V1_3::OperandType::SUBGRAPH: - case V1_3::OperandType::TENSOR_FLOAT16: - case V1_3::OperandType::TENSOR_FLOAT32: - case V1_3::OperandType::TENSOR_BOOL8: - case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + case OperandType::FLOAT16: + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::BOOL: + case OperandType::SUBGRAPH: + case OperandType::TENSOR_FLOAT16: + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_BOOL8: + case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: if (operand.scale != 0.f) { LOG(ERROR) << "Operand " << index << ": Operand of type " << getOperandTypeName(operand.type) << " with a non-zero scale (" @@ -245,7 +244,7 @@ return false; } break; - case V1_3::OperandType::TENSOR_INT32: + case OperandType::TENSOR_INT32: // TENSOR_INT32 may be used with or without scale, depending on the operation. if (operand.scale < 0.f) { LOG(ERROR) << "Operand " << index << ": Operand of type " @@ -253,11 +252,11 @@ return false; } break; - case V1_3::OperandType::TENSOR_QUANT8_ASYMM: - case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED: - case V1_3::OperandType::TENSOR_QUANT8_SYMM: - case V1_3::OperandType::TENSOR_QUANT16_ASYMM: - case V1_3::OperandType::TENSOR_QUANT16_SYMM: + case OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: + case OperandType::TENSOR_QUANT8_SYMM: + case OperandType::TENSOR_QUANT16_ASYMM: + case OperandType::TENSOR_QUANT16_SYMM: if (operand.scale <= 0.f) { LOG(ERROR) << "Operand " << index << ": Operand of type " << getOperandTypeName(operand.type) << " with a non-positive scale"; @@ -278,18 +277,18 @@ // Validate the zeroPoint. switch (operand.type) { - case V1_3::OperandType::FLOAT16: - case V1_3::OperandType::FLOAT32: - case V1_3::OperandType::INT32: - case V1_3::OperandType::UINT32: - case V1_3::OperandType::BOOL: - case V1_3::OperandType::SUBGRAPH: - case V1_3::OperandType::TENSOR_FLOAT16: - case V1_3::OperandType::TENSOR_FLOAT32: - case V1_3::OperandType::TENSOR_INT32: - case V1_3::OperandType::TENSOR_BOOL8: - case V1_3::OperandType::TENSOR_QUANT8_SYMM: - case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + case OperandType::FLOAT16: + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::BOOL: + case OperandType::SUBGRAPH: + case OperandType::TENSOR_FLOAT16: + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + case OperandType::TENSOR_BOOL8: + case OperandType::TENSOR_QUANT8_SYMM: + case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: if (operand.zeroPoint != 0) { LOG(ERROR) << "Operand " << index << ": Operand of type " << getOperandTypeName(operand.type) << " with a non-zero zeroPoint " @@ -297,7 +296,7 @@ return false; } break; - case V1_3::OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT8_ASYMM: if (operand.zeroPoint < 0 || operand.zeroPoint > 255) { LOG(ERROR) << "Operand " << index << ": Operand of type " << getOperandTypeName(operand.type) << " with an invalid zeroPoint " @@ -305,7 +304,7 @@ return false; } break; - case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED: + case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: if (operand.zeroPoint < -128 || operand.zeroPoint > 127) { LOG(ERROR) << "Operand " << index << ": Operand of type " << getOperandTypeName(operand.type) << " with an invalid zeroPoint " @@ -313,7 +312,7 @@ return false; } break; - case V1_3::OperandType::TENSOR_QUANT16_ASYMM: + case OperandType::TENSOR_QUANT16_ASYMM: if (operand.zeroPoint < 0 || operand.zeroPoint > 65535) { LOG(ERROR) << "Operand " << index << ": Operand of type " << getOperandTypeName(operand.type) << " with an invalid zeroPoint " @@ -321,7 +320,7 @@ return false; } break; - case V1_3::OperandType::TENSOR_QUANT16_SYMM: + case OperandType::TENSOR_QUANT16_SYMM: if (operand.zeroPoint != 0) { LOG(ERROR) << "Operand " << index << ": Operand of type " << getOperandTypeName(operand.type) << " with a non-zero zeroPoint " @@ -343,9 +342,9 @@ NN_RET_CHECK(validateOperandExtraParams(operand, index)); // Validate the lifetime and the location. - const V1_0::DataLocation& location = operand.location; + const DataLocation& location = operand.location; switch (operand.lifetime) { - case V1_3::OperandLifeTime::CONSTANT_COPY: + case OperandLifeTime::CONSTANT_COPY: if (location.poolIndex != 0) { LOG(ERROR) << "Operand " << index << ": CONSTANT_COPY with a non-zero poolIndex " @@ -361,15 +360,15 @@ return false; } break; - case V1_3::OperandLifeTime::CONSTANT_REFERENCE: + case OperandLifeTime::CONSTANT_REFERENCE: if (!poolVerifier.validate(location)) { return false; } break; - case V1_3::OperandLifeTime::TEMPORARY_VARIABLE: - case V1_3::OperandLifeTime::SUBGRAPH_INPUT: - case V1_3::OperandLifeTime::SUBGRAPH_OUTPUT: - case V1_3::OperandLifeTime::NO_VALUE: + case OperandLifeTime::TEMPORARY_VARIABLE: + case OperandLifeTime::SUBGRAPH_INPUT: + case OperandLifeTime::SUBGRAPH_OUTPUT: + case OperandLifeTime::NO_VALUE: if (location.poolIndex != 0 || location.offset != 0 || location.length != 0) { LOG(ERROR) << "Operand " << index << ": Unexpected poolIndex " << location.poolIndex << ", offset " << location.offset @@ -378,14 +377,14 @@ return false; } break; - case V1_3::OperandLifeTime::SUBGRAPH: { + case OperandLifeTime::SUBGRAPH: { if (location.poolIndex != 0) { LOG(ERROR) << "Operand " << index << ": SUBGRAPH with a non-zero poolIndex " << location.poolIndex; return false; } if (location.offset >= subgraphs.size()) { - LOG(ERROR) << "Model::Subgraph index out of range: " << location.offset + LOG(ERROR) << "Subgraph index out of range: " << location.offset << " >= " << subgraphs.size(); return false; } @@ -402,8 +401,8 @@ } // Make sure SUBGRAPH operand type and lifetime always go together. - if ((operand.type == V1_3::OperandType::SUBGRAPH) != - (operand.lifetime == V1_3::OperandLifeTime::SUBGRAPH)) { + if ((operand.type == OperandType::SUBGRAPH) != + (operand.lifetime == OperandLifeTime::SUBGRAPH)) { LOG(ERROR) << "Operand " << index << ": Operand of type " << toString(operand.type) << " cannot have lifetime " << toString(operand.lifetime); return false; @@ -411,10 +410,10 @@ // For constants, validate that the length is as expected. The other lifetimes // expect the length to be 0. Don't validate for OEM types. - if (operand.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE || - operand.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY) { - if (!isExtensionOperandType(operand.type) && operand.type != V1_3::OperandType::OEM && - operand.type != V1_3::OperandType::TENSOR_OEM_BYTE) { + if (operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE || + operand.lifetime == OperandLifeTime::CONSTANT_COPY) { + if (!isExtensionOperandType(operand.type) && operand.type != OperandType::OEM && + operand.type != OperandType::TENSOR_OEM_BYTE) { uint32_t expectedLength = nonExtensionOperandSizeOfData(operand); if (location.length != expectedLength) { LOG(ERROR) << "Operand " << index << ": For operand " << toString(operand) @@ -447,22 +446,19 @@ } template <typename VersionedOperation> -static bool validateOperations(const hardware::hidl_vec<VersionedOperation>& operations, - const hardware::hidl_vec<V1_3::Operand>& operands, - const hardware::hidl_vec<V1_3::Subgraph>& subgraphs, - ValidationMode mode) { - auto canonicalSubgraphs = uncheckedConvert(subgraphs); - auto isValidSubgraphReference = [&canonicalSubgraphs](const Operand& modelOperand) -> bool { +static bool validateOperations(const hidl_vec<VersionedOperation>& operations, + const hidl_vec<Operand>& operands, + const hidl_vec<Subgraph>& subgraphs, ValidationMode mode) { + auto isValidSubgraphReference = [&subgraphs](const Operand& modelOperand) -> bool { NN_RET_CHECK(modelOperand.type == OperandType::SUBGRAPH) - << "Unexpected operand type: " << modelOperand.type; - NN_RET_CHECK_LT(modelOperand.location.offset, canonicalSubgraphs.size()) + << "Unexpected operand type: " << toString(modelOperand.type); + NN_RET_CHECK_LT(modelOperand.location.offset, subgraphs.size()) << "Invalid subgraph reference"; return true; }; - auto getSubgraph = - [&canonicalSubgraphs](const Operand& modelOperand) -> const Model::Subgraph* { - CHECK_LT(modelOperand.location.offset, canonicalSubgraphs.size()); - return &canonicalSubgraphs[modelOperand.location.offset]; + auto getSubgraph = [&subgraphs](const Operand& modelOperand) -> const Subgraph* { + CHECK_LT(modelOperand.location.offset, subgraphs.size()); + return &subgraphs[modelOperand.location.offset]; }; auto getInputCount = [&getSubgraph](const Operand& modelOperand) -> uint32_t { return getSubgraph(modelOperand)->inputIndexes.size(); @@ -472,33 +468,33 @@ }; auto getInputOperand = [&getSubgraph](const Operand& modelOperand, uint32_t index) -> const Operand* { - const Model::Subgraph& subgraph = *getSubgraph(modelOperand); + const Subgraph& subgraph = *getSubgraph(modelOperand); CHECK_LT(subgraph.inputIndexes[index], subgraph.operands.size()); return &subgraph.operands[subgraph.inputIndexes[index]]; }; auto getOutputOperand = [&getSubgraph](const Operand& modelOperand, uint32_t index) -> const Operand* { - const Model::Subgraph& subgraph = *getSubgraph(modelOperand); + const Subgraph& subgraph = *getSubgraph(modelOperand); CHECK_LT(subgraph.outputIndexes[index], subgraph.operands.size()); return &subgraph.operands[subgraph.outputIndexes[index]]; }; + const size_t operandCount = operands.size(); for (auto& op : operations) { // TODO Validate the shapes and any known values. This is currently // done in CpuExecutor but should be done here for all drivers. - int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(), - op.inputs.size() > 0 ? op.inputs.data() : nullptr, - op.outputs.size(), - op.outputs.size() > 0 ? op.outputs.data() : nullptr, - uncheckedConvert(operands), getHalVersion(op), - {.isValidSubgraphReference = isValidSubgraphReference, - .getSubgraphInputCount = getInputCount, - .getSubgraphOutputCount = getOutputCount, - .getSubgraphInputOperand = getInputOperand, - .getSubgraphOutputOperand = getOutputOperand, - // 1.3 HAL does not support CF operations with operands of - // unknown size. See http://b/132458982#comment63. - .allowControlFlowOperationWithOperandOfUnknownSize = - mode == ValidationMode::RUNTIME}); + int error = validateOperation( + static_cast<int32_t>(op.type), op.inputs.size(), + op.inputs.size() > 0 ? op.inputs.data() : nullptr, op.outputs.size(), + op.outputs.size() > 0 ? op.outputs.data() : nullptr, operands, getHalVersion(op), + {.isValidSubgraphReference = isValidSubgraphReference, + .getSubgraphInputCount = getInputCount, + .getSubgraphOutputCount = getOutputCount, + .getSubgraphInputOperand = getInputOperand, + .getSubgraphOutputOperand = getOutputOperand, + // 1.3 HAL does not support CF operations with operands of + // unknown size. See http://b/132458982#comment63. + .allowControlFlowOperationWithOperandOfUnknownSize = + mode == ValidationMode::RUNTIME}); if (error != ANEURALNETWORKS_NO_ERROR) { LOG(ERROR) << "Invalid operation " << toString(op.type); return false; @@ -508,9 +504,9 @@ // but it is retained here in order to emit more informative // error messages. for (uint32_t i : op.outputs) { - const V1_3::Operand& operand = operands[i]; - if (operand.lifetime != V1_3::OperandLifeTime::TEMPORARY_VARIABLE && - operand.lifetime != V1_3::OperandLifeTime::SUBGRAPH_OUTPUT) { + const Operand& operand = operands[i]; + if (operand.lifetime != OperandLifeTime::TEMPORARY_VARIABLE && + operand.lifetime != OperandLifeTime::SUBGRAPH_OUTPUT) { LOG(ERROR) << "Writing to operand " << i << " with incompatible lifetime " << toString(operand.lifetime); return false; @@ -520,7 +516,7 @@ return true; } -bool validatePool(const hardware::hidl_memory& pool, HalVersion ver) { +bool validatePool(const hidl_memory& pool, HalVersion ver) { const auto& name = pool.name(); if (name != "ashmem" && name != "mmap_fd" && ((ver < HalVersion::V1_2) || @@ -537,9 +533,9 @@ bool validatePool(const V1_3::Request::MemoryPool& pool, HalVersion ver) { switch (pool.getDiscriminator()) { - case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory: + case Request::MemoryPool::hidl_discriminator::hidlMemory: return validatePool(pool.hidlMemory(), ver); - case V1_3::Request::MemoryPool::hidl_discriminator::token: + case Request::MemoryPool::hidl_discriminator::token: return pool.token() > 0; } LOG(FATAL) << "unknown MemoryPool discriminator"; @@ -547,21 +543,20 @@ } template <class T_MemoryPool> -static bool validatePools(const hardware::hidl_vec<T_MemoryPool>& pools, HalVersion ver) { +static bool validatePools(const hidl_vec<T_MemoryPool>& pools, HalVersion ver) { return std::all_of(pools.begin(), pools.end(), [ver](const auto& pool) { return validatePool(pool, ver); }); } -static bool validateModelInputOutputs(const hardware::hidl_vec<uint32_t> indexes, - const hardware::hidl_vec<V1_3::Operand>& operands, - V1_3::OperandLifeTime lifetime) { +static bool validateModelInputOutputs(const hidl_vec<uint32_t> indexes, + const hidl_vec<Operand>& operands, OperandLifeTime lifetime) { const size_t operandCount = operands.size(); for (uint32_t i : indexes) { if (i >= operandCount) { LOG(ERROR) << "Model input or output index out of range: " << i << "/" << operandCount; return false; } - const V1_3::Operand& operand = operands[i]; + const Operand& operand = operands[i]; if (operand.lifetime != lifetime) { LOG(ERROR) << "Model input or output operand " << i << " has lifetime of " << toString(operand.lifetime) << " instead of the expected " @@ -602,12 +597,12 @@ // mark known operands for (size_t i = 0; i < model.operands.size(); ++i) { const auto& operand = model.operands[i]; - const V1_3::OperandLifeTime lifetime = convertToV1_3(operand.lifetime); - operandValueKnown[i] = lifetime == V1_3::OperandLifeTime::SUBGRAPH_INPUT || - lifetime == V1_3::OperandLifeTime::CONSTANT_COPY || - lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE || - lifetime == V1_3::OperandLifeTime::NO_VALUE || - lifetime == V1_3::OperandLifeTime::SUBGRAPH; + const OperandLifeTime lifetime = convertToV1_3(operand.lifetime); + operandValueKnown[i] = lifetime == OperandLifeTime::SUBGRAPH_INPUT || + lifetime == OperandLifeTime::CONSTANT_COPY || + lifetime == OperandLifeTime::CONSTANT_REFERENCE || + lifetime == OperandLifeTime::NO_VALUE || + lifetime == OperandLifeTime::SUBGRAPH; } // Validate that operations are sorted into execution order. @@ -678,8 +673,8 @@ LOG(ERROR) << "Model contains a circular subgraph reference"; return false; } - for (const V1_3::Operand& operand : subgraph.operands) { - if (operand.lifetime == V1_3::OperandLifeTime::SUBGRAPH) { + for (const Operand& operand : subgraph.operands) { + if (operand.lifetime == OperandLifeTime::SUBGRAPH) { uint32_t refSubgraphIndex = operand.location.offset; if (!checkNoReferenceCycles(model, model.referenced[refSubgraphIndex], path)) { return false; @@ -705,14 +700,14 @@ } // We only need versioned operands for their validation. For all the other // validations we can use operands upcasted to the latest version. - const hardware::hidl_vec<V1_3::Operand> latestVersionOperands = convertToV1_3(model.operands); + const hidl_vec<Operand> latestVersionOperands = convertToV1_3(model.operands); return (validateOperands(model.operands, model.operandValues, model.pools, /*subgraphs=*/{}, /*allowUnspecifiedRank=*/version >= HalVersion::V1_2) && validateOperations(model.operations, latestVersionOperands, /*subgraphs=*/{}, mode) && validateModelInputOutputs(model.inputIndexes, latestVersionOperands, - V1_3::OperandLifeTime::SUBGRAPH_INPUT) && + OperandLifeTime::SUBGRAPH_INPUT) && validateModelInputOutputs(model.outputIndexes, latestVersionOperands, - V1_3::OperandLifeTime::SUBGRAPH_OUTPUT) && + OperandLifeTime::SUBGRAPH_OUTPUT) && validatePools(model.pools, version) && validateGraph(model)); } @@ -727,15 +722,15 @@ LOG(ERROR) << "Invalid empty model."; return false; } - auto validateSubgraph = [&model, mode](const V1_3::Subgraph& subgraph) -> bool { + auto validateSubgraph = [&model, mode](const Subgraph& subgraph) -> bool { return (validateOperands(subgraph.operands, model.operandValues, model.pools, model.referenced, /*allowUnspecifiedRank=*/true) && validateOperations(subgraph.operations, subgraph.operands, model.referenced, mode) && validateModelInputOutputs(subgraph.inputIndexes, subgraph.operands, - V1_3::OperandLifeTime::SUBGRAPH_INPUT) && + OperandLifeTime::SUBGRAPH_INPUT) && validateModelInputOutputs(subgraph.outputIndexes, subgraph.operands, - V1_3::OperandLifeTime::SUBGRAPH_OUTPUT) && + OperandLifeTime::SUBGRAPH_OUTPUT) && validateGraph(subgraph)); }; return (validateSubgraph(model.main) && @@ -746,11 +741,11 @@ // Validates the arguments of a request. type is either "input" or "output" and is used // for printing error messages. The operandIndexes is the appropriate array of input // or output operand indexes that was passed to the ANeuralNetworksModel_identifyInputsAndOutputs. -static bool validateRequestArguments( - const hardware::hidl_vec<V1_0::RequestArgument>& requestArguments, - const hardware::hidl_vec<uint32_t>& operandIndexes, - const hardware::hidl_vec<V1_3::Operand>& operands, const MemoryAccessVerifier& poolVerifier, - bool allowUnspecified, const char* type) { +static bool validateRequestArguments(const hidl_vec<RequestArgument>& requestArguments, + const hidl_vec<uint32_t>& operandIndexes, + const hidl_vec<Operand>& operands, + const MemoryAccessVerifier& poolVerifier, + bool allowUnspecified, const char* type) { // The request should specify as many arguments as were described in the model. const size_t requestArgumentCount = requestArguments.size(); if (requestArgumentCount != operandIndexes.size()) { @@ -760,13 +755,13 @@ } for (size_t requestArgumentIndex = 0; requestArgumentIndex < requestArgumentCount; requestArgumentIndex++) { - const V1_0::RequestArgument& requestArgument = requestArguments[requestArgumentIndex]; - const V1_0::DataLocation& location = requestArgument.location; + const RequestArgument& requestArgument = requestArguments[requestArgumentIndex]; + const DataLocation& location = requestArgument.location; // Get the operand index for this argument. We extract it from the list // that was provided in the call to ANeuralNetworksModel_identifyInputsAndOutputs. // We assume in this function that the model has been validated already. const uint32_t operandIndex = operandIndexes[requestArgumentIndex]; - const V1_3::Operand& operand = operands[operandIndex]; + const Operand& operand = operands[operandIndex]; if (requestArgument.hasNoValue) { if (location.poolIndex != 0 || location.offset != 0 || location.length != 0 || requestArgument.dimensions.size() != 0) { @@ -784,20 +779,11 @@ uint32_t requestRank = requestArgument.dimensions.size(); if (requestRank == 0) { if (!allowUnspecified) { - // NOTE: validateRequestArguments cannot validate unknown tensor rank with - // extension operand type. - if (!isExtensionOperandType(operand.type) && - !nonExtensionOperandTypeIsScalar(static_cast<int>(operand.type))) { - NN_RET_CHECK_GT(modelRank, 0) - << "Model " << type << " " << requestArgumentIndex - << " has unknown rank but the request does not specify the rank."; - } // Validate that all the dimensions are specified in the model. for (size_t i = 0; i < modelRank; i++) { if (operand.dimensions[i] == 0) { - LOG(ERROR) - << "Model has dimension " << i - << " set to 0 but the request does not specify the dimension."; + LOG(ERROR) << "Model has dimension " << i + << " set to 0 but the request does specify the dimension."; return false; } } @@ -867,16 +853,16 @@ } bool validateMemoryDesc(const V1_3::BufferDesc& desc, - const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels, - const hardware::hidl_vec<V1_3::BufferRole>& inputRoles, - const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, + const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels, + const hidl_vec<V1_3::BufferRole>& inputRoles, + const hidl_vec<V1_3::BufferRole>& outputRoles, std::function<const V1_3::Model*(const sp<V1_3::IPreparedModel>&)> getModel, - std::set<HalPreparedModelRole>* preparedModelRoles, + std::set<PreparedModelRole>* preparedModelRoles, V1_3::Operand* combinedOperand) { NN_RET_CHECK(preparedModels.size() != 0); NN_RET_CHECK(inputRoles.size() != 0 || outputRoles.size() != 0); - std::set<HalPreparedModelRole> roles; + std::set<PreparedModelRole> roles; std::vector<V1_3::Operand> operands; operands.reserve(inputRoles.size() + outputRoles.size()); for (const auto& role : inputRoles) { @@ -945,15 +931,14 @@ return true; } -bool validateExecutionPreference(V1_1::ExecutionPreference preference) { - return preference == V1_1::ExecutionPreference::LOW_POWER || - preference == V1_1::ExecutionPreference::FAST_SINGLE_ANSWER || - preference == V1_1::ExecutionPreference::SUSTAINED_SPEED; +bool validateExecutionPreference(ExecutionPreference preference) { + return preference == ExecutionPreference::LOW_POWER || + preference == ExecutionPreference::FAST_SINGLE_ANSWER || + preference == ExecutionPreference::SUSTAINED_SPEED; } -bool validatePriority(V1_3::Priority priority) { - return priority == V1_3::Priority::LOW || priority == V1_3::Priority::MEDIUM || - priority == V1_3::Priority::HIGH; +bool validatePriority(Priority priority) { + return priority == Priority::LOW || priority == Priority::MEDIUM || priority == Priority::HIGH; } bool validOperandType(V1_0::OperandType operandType) {
diff --git a/common/Validation.cpp b/common/Validation.cpp deleted file mode 100644 index 91d894c..0000000 --- a/common/Validation.cpp +++ /dev/null
@@ -1,2899 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "Validation.h" - -#include <android-base/logging.h> -#include <android-base/mapped_file.h> - -#include <algorithm> -#include <cctype> -#include <functional> -#include <limits> -#include <memory> -#include <numeric> -#include <set> -#include <sstream> -#include <string> -#include <string_view> -#include <tuple> -#include <utility> -#include <variant> -#include <vector> - -#include "ControlFlow.h" -#include "OperandTypes.h" -#include "OperationResolver.h" -#include "OperationTypes.h" -#include "Result.h" -#include "SharedMemory.h" -#include "TypeUtils.h" -#include "Types.h" - -// The NN_VALIDATE family of macros defined below is similar to the CHECK family defined in -// system/libbase/include/android-base/logging.h -// -// The difference is that NN_VALIDATE macros use LOG(ERROR) instead of LOG(FATAL) -// and return false instead of aborting. - -// Logs an error and returns false or INVALID. Append context using << after. For example: -// -// NN_VALIDATE_FAIL() << "Something went wrong"; -// -// The containing function must return a bool or Version. -#define NN_VALIDATE_FAIL() \ - return NN_ERROR() << "NN_VALIDATE failed (" << __FILE__ << ":" << __LINE__ << "): " - -// Logs an error and returns false or Version::INVALID if condition is false. Extra logging can be -// appended using << after. For example: -// -// NN_VALIDATE(false) << "Something went wrong"; -// -// The containing function must return a bool. -#define NN_VALIDATE(condition) \ - while (UNLIKELY(!(condition))) NN_VALIDATE_FAIL() << #condition << " " - -// Helper for NN_VALIDATE_xx(x, y) macros. -#define NN_VALIDATE_OP(LHS, RHS, OP) \ - for (auto _values = ::android::base::MakeEagerEvaluator(LHS, RHS); \ - UNLIKELY(!(_values.lhs.v OP _values.rhs.v)); \ - /* empty */) \ - NN_VALIDATE_FAIL() \ - << #LHS << " " << #OP << " " << #RHS << " (" << #LHS << " = " \ - << ::android::base::LogNullGuard<decltype(_values.lhs.v)>::Guard(_values.lhs.v) \ - << ", " << #RHS << " = " \ - << ::android::base::LogNullGuard<decltype(_values.rhs.v)>::Guard(_values.rhs.v) \ - << ") " - -// Logs an error and returns false or Version::INVALID if a condition between x and y does not hold. -// Extra logging can be appended using << after. For example: -// -// NN_VALIDATE_EQ(a, b) << "Something went wrong"; -// -// The values must implement the appropriate comparison operator as well as -// `operator<<(std::ostream&, ...)`. -// The containing function must return a bool or Version. -#define NN_VALIDATE_EQ(x, y) NN_VALIDATE_OP(x, y, ==) -#define NN_VALIDATE_NE(x, y) NN_VALIDATE_OP(x, y, !=) -#define NN_VALIDATE_LE(x, y) NN_VALIDATE_OP(x, y, <=) -#define NN_VALIDATE_LT(x, y) NN_VALIDATE_OP(x, y, <) -#define NN_VALIDATE_GE(x, y) NN_VALIDATE_OP(x, y, >=) -#define NN_VALIDATE_GT(x, y) NN_VALIDATE_OP(x, y, >) - -namespace android::nn { -namespace { - -constexpr auto kNullptrVariant = std::variant<const void*, void*>{}; -constexpr auto kInvalidMemoryDomainToken = Request::MemoryDomainToken{}; - -template <typename Type, typename ValidationFunction> -Result<Version> validateVector(const std::vector<Type>& objects, - const ValidationFunction& validationFunction) { - auto version = Version::ANDROID_OC_MR1; - for (const auto& object : objects) { - version = combineVersions(version, NN_TRY(validationFunction(object))); - } - return version; -} - -bool isValidExtensionName(const std::string& name) { - constexpr auto validSymbol = [](char symbol) { - return std::islower(symbol) || std::isdigit(symbol) || symbol == '.' || symbol == '_'; - }; - const bool hasOnlyValidSymbols = std::all_of(name.begin(), name.end(), validSymbol); - const bool hasAtLeastOnePeriod = std::find(name.begin(), name.end(), '.') != name.end(); - return hasOnlyValidSymbols && hasAtLeastOnePeriod; -} - -Result<Version> validateDeviceStatus(const DeviceStatus& deviceStatus) { - switch (deviceStatus) { - case DeviceStatus::AVAILABLE: - case DeviceStatus::BUSY: - case DeviceStatus::OFFLINE: - case DeviceStatus::UNKNOWN: - return Version::ANDROID_OC_MR1; - } - NN_VALIDATE_FAIL() << "Invalid DeviceStatus " << deviceStatus; -} - -Result<Version> validateExecutionPreference(const ExecutionPreference& executionPreference) { - switch (executionPreference) { - case ExecutionPreference::FAST_SINGLE_ANSWER: - // ExecutionPreference::FAST_SINGLE_ANSWER is the default value, so it is implicitly - // valid for all versions. - return Version::ANDROID_OC_MR1; - case ExecutionPreference::LOW_POWER: - case ExecutionPreference::SUSTAINED_SPEED: - return Version::ANDROID_P; - } - NN_VALIDATE_FAIL() << "Invalid ExecutionPreference " << executionPreference; -} - -Result<Version> validateDeviceType(const DeviceType& deviceType) { - switch (deviceType) { - case DeviceType::UNKNOWN: - // DeviceType was introduced in the 1.2 NN HAL. DeviceType::UNKNOWN is returned when - // querying versions that are prior to the 1.2 NN HAL. DeviceType::UNKNOWN is not a - // valid code to return for a driver that implement at least a 1.2 NN HAL. If we need a - // range of versions, make ANDROID_Q (NN HAL 1.2) the exclusive upper bound for - // DeviceType::UNKNOWN. - return Version::ANDROID_OC_MR1; - case DeviceType::OTHER: - case DeviceType::CPU: - case DeviceType::GPU: - case DeviceType::ACCELERATOR: - return Version::ANDROID_Q; - } - NN_VALIDATE_FAIL() << "Invalid DeviceType " << deviceType; -} - -Result<Version> validateMeasureTiming(const MeasureTiming& measureTiming) { - switch (measureTiming) { - case MeasureTiming::NO: - // MeasureTiming::NO is the default value, so it is implicitly valid for all versions. - return Version::ANDROID_OC_MR1; - case MeasureTiming::YES: - return Version::ANDROID_Q; - } - NN_VALIDATE_FAIL() << "Invalid MeasureTiming " << measureTiming; -} - -Result<Version> validateOperandType(const OperandType& operandType) { - switch (operandType) { - case OperandType::FLOAT32: - case OperandType::INT32: - case OperandType::UINT32: - case OperandType::TENSOR_FLOAT32: - case OperandType::TENSOR_INT32: - case OperandType::TENSOR_QUANT8_ASYMM: - case OperandType::OEM: - case OperandType::TENSOR_OEM_BYTE: - return Version::ANDROID_OC_MR1; - case OperandType::BOOL: - case OperandType::TENSOR_QUANT16_SYMM: - case OperandType::TENSOR_FLOAT16: - case OperandType::TENSOR_BOOL8: - case OperandType::FLOAT16: - case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: - case OperandType::TENSOR_QUANT16_ASYMM: - case OperandType::TENSOR_QUANT8_SYMM: - return Version::ANDROID_Q; - case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: - case OperandType::SUBGRAPH: - return Version::ANDROID_R; - } - if (isExtension(operandType)) { - return Version::ANDROID_Q; - } - NN_VALIDATE_FAIL() << "Invalid OperandType " << operandType; -} - -Result<Version> validateOperandLifeTime(const Operand& operand) { - // Make sure SUBGRAPH operand type and lifetime always go together. - NN_VALIDATE_EQ((operand.type == OperandType::SUBGRAPH), - (operand.lifetime == Operand::LifeTime::SUBGRAPH)) - << "Operand of type " << operand.type << " cannot have lifetime " << operand.lifetime; - - switch (operand.lifetime) { - case Operand::LifeTime::TEMPORARY_VARIABLE: - case Operand::LifeTime::SUBGRAPH_INPUT: - case Operand::LifeTime::SUBGRAPH_OUTPUT: - case Operand::LifeTime::CONSTANT_COPY: - case Operand::LifeTime::CONSTANT_REFERENCE: - case Operand::LifeTime::NO_VALUE: - case Operand::LifeTime::POINTER: - return Version::ANDROID_OC_MR1; - case Operand::LifeTime::SUBGRAPH: - return Version::ANDROID_R; - } - NN_VALIDATE_FAIL() << "Invalid Operand::LifeTime " << operand.lifetime; -} - -Result<Version> validatePriority(const Priority& priority) { - switch (priority) { - case Priority::MEDIUM: - // Priority::MEDIUM is the default value, so it is implicitly valid for all versions. - return Version::ANDROID_OC_MR1; - case Priority::LOW: - case Priority::HIGH: - return Version::ANDROID_R; - } - NN_VALIDATE_FAIL() << "Invalid Priority " << priority; -} - -Result<Version> validateErrorStatus(const ErrorStatus& errorStatus) { - // Note that MISSED_DEADLINE_*, RESOURCE_EXHAUSTED_*, and DEAD_OBJECT were introduced ih - // ANDROID_R, but these can be cast to ANDROID_OC_MR1 as GENERAL_FAILURE. - switch (errorStatus) { - case ErrorStatus::NONE: - case ErrorStatus::DEVICE_UNAVAILABLE: - case ErrorStatus::GENERAL_FAILURE: - case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: - case ErrorStatus::INVALID_ARGUMENT: - case ErrorStatus::MISSED_DEADLINE_TRANSIENT: - case ErrorStatus::MISSED_DEADLINE_PERSISTENT: - case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: - case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: - case ErrorStatus::DEAD_OBJECT: - return Version::ANDROID_OC_MR1; - } - NN_VALIDATE_FAIL() << "Invalid ErrorStatus " << errorStatus; -} - -Result<Version> validateFusedActivationFunc(const FusedActivationFunc& activation) { - switch (activation) { - case FusedActivationFunc::NONE: - case FusedActivationFunc::RELU: - case FusedActivationFunc::RELU1: - case FusedActivationFunc::RELU6: - return Version::ANDROID_OC_MR1; - } - NN_VALIDATE_FAIL() << "Invalid FusedActivationFunc " << activation; -} - -Result<Version> validateOutputShape(const OutputShape& /*outputShape*/) { - return Version::ANDROID_Q; -} - -Result<Version> validateTiming(const Timing& timing) { - constexpr auto kNoTiming = Timing{}; - if (timing == kNoTiming) { - // kNoTiming is the default value, so it is implicitly valid for all versions. - return Version::ANDROID_OC_MR1; - } - if (timing.timeInDriver.has_value() && timing.timeOnDevice.has_value()) { - // `lazyMessage` is a lazy function to produce the timing validation error message. - // Currently, the code is not able to inline the message in NN_VALIDATE due to a - // argument-dependent lookup issue with nn::detail::ErrorBuilder interacting with std types - // such as std::chrono::duration, so this function uses an indirection through - // std::ostringstream. - const auto lazyMessage = [&timing]() -> std::string { - std::ostringstream oss; - oss << "Timing::timeOnDevice (" << timing.timeOnDevice.value() - << ") must not exceed Timing::timeInDriver (" << timing.timeInDriver.value() << ")"; - return oss.str(); - }; - NN_VALIDATE(timing.timeOnDevice.value() <= timing.timeInDriver.value()) << lazyMessage(); - } - return Version::ANDROID_Q; -} - -Result<Version> validateCapabilitiesPerformanceInfo( - const Capabilities::PerformanceInfo& performanceInfo) { - NN_VALIDATE_GT(performanceInfo.execTime, 0.0f); - NN_VALIDATE_GT(performanceInfo.powerUsage, 0.0f); - return Version::ANDROID_OC_MR1; -} - -Result<Version> validateCapabilitiesOperandPerformance( - const Capabilities::OperandPerformance& operandPerformance) { - auto version = NN_TRY(validateOperandType(operandPerformance.type)); - return combineVersions(version, - NN_TRY(validateCapabilitiesPerformanceInfo(operandPerformance.info))); -} - -Result<Version> validateCapabilitiesOperandPerformanceTable( - const Capabilities::OperandPerformanceTable& operandPerformances) { - // OperandPerformanceTable's order was validated when it was created, and it is castable to any - // version. If an OperandType does not exist in the lower version being converted to, that - // OperandPerformance will be dropped. - NN_TRY(validateVector(operandPerformances.asVector(), validateCapabilitiesOperandPerformance)); - return Version::ANDROID_OC_MR1; -} - -Result<Version> validateCapabilities(const Capabilities& capabilities) { - auto version = - NN_TRY(validateCapabilitiesOperandPerformanceTable(capabilities.operandPerformance)); - - version = combineVersions(version, - NN_TRY(validateCapabilitiesPerformanceInfo( - capabilities.relaxedFloat32toFloat16PerformanceScalar))); - version = combineVersions(version, - NN_TRY(validateCapabilitiesPerformanceInfo( - capabilities.relaxedFloat32toFloat16PerformanceTensor))); - version = combineVersions( - version, NN_TRY(validateCapabilitiesPerformanceInfo(capabilities.ifPerformance))); - version = combineVersions( - version, NN_TRY(validateCapabilitiesPerformanceInfo(capabilities.whilePerformance))); - - return version; -} - -Result<Version> validateExtensionOperandTypeInformation( - const Extension::OperandTypeInformation& operandTypeInformation) { - NN_VALIDATE_GT(operandTypeInformation.byteSize, 0u); - return Version::ANDROID_Q; -} - -Result<Version> validateExtension(const Extension& extension) { - NN_VALIDATE(isValidExtensionName(extension.name)); - - // Verify all OperandTypeInformations have unique types. - std::vector<uint16_t> types; - types.reserve(extension.operandTypes.size()); - std::transform(extension.operandTypes.begin(), extension.operandTypes.end(), - std::back_inserter(types), - [](const Extension::OperandTypeInformation& operandTypeInformation) { - return operandTypeInformation.type; - }); - std::sort(types.begin(), types.end()); - const auto iter = std::adjacent_find(types.begin(), types.end()); - NN_VALIDATE(iter == types.end()) << "Extension has duplicate type " << *iter; - - return combineVersions(Version::ANDROID_Q, - NN_TRY(validateVector(extension.operandTypes, - validateExtensionOperandTypeInformation))); -} - -Result<Version> validateExtensions(const std::vector<Extension>& extensions) { - const auto version = NN_TRY(validateVector(extensions, validateExtension)); - - // Verify all extensions have unique names. - std::vector<std::reference_wrapper<const std::string>> names; - names.reserve(extensions.size()); - std::transform(extensions.begin(), extensions.end(), std::back_inserter(names), - [](const Extension& extension) { return std::cref(extension.name); }); - std::sort(names.begin(), names.end(), std::less<std::string>{}); - const auto nameIter = - std::adjacent_find(names.begin(), names.end(), std::equal_to<std::string>{}); - NN_VALIDATE(nameIter == names.end()) - << "Two or more extensions have the duplicate name " << nameIter->get(); - - return version; -} - -// Forward declaration of subgraph validation function. -Result<Version> validateModelSubgraph(const Model::Subgraph& subgraph, - std::optional<size_t> referencedIndex, - size_t operandValuesSize, - const std::vector<size_t>& poolSizes, - const std::vector<Model::Subgraph>& referenced, - std::vector<std::optional<Version>>* subgraphVersionCache); - -Result<Version> validateOperandDataLocation( - const Operand& operand, size_t operandValuesSize, const std::vector<size_t>& poolSizes, - const std::vector<Model::Subgraph>& subgraphs, - std::vector<std::optional<Version>>* subgraphVersionCache) { - const DataLocation& location = operand.location; - NN_VALIDATE_EQ(location.padding, 0u) - << "DataLocation with a non-zero padding used in Model: " << location.padding; - switch (operand.lifetime) { - case Operand::LifeTime::CONSTANT_COPY: - NN_VALIDATE(location.pointer == kNullptrVariant) - << "CONSTANT_COPY with a non-null pointer"; - NN_VALIDATE_EQ(location.poolIndex, 0u) - << "CONSTANT_COPY with a non-zero poolIndex " << location.poolIndex; - // Do the addition using uint64_t to avoid potential wrap-around problems. - NN_VALIDATE_LE(static_cast<uint64_t>(location.offset) + location.length, - operandValuesSize) - << "OperandValue location out of range. Starts at " << location.offset - << ", length " << location.length << ", max " << operandValuesSize; - return Version::ANDROID_OC_MR1; - case Operand::LifeTime::CONSTANT_REFERENCE: - NN_VALIDATE_LT(location.poolIndex, poolSizes.size()); - // Do the addition using uint64_t to avoid potential wrap-around problems. - NN_VALIDATE_LE(static_cast<uint64_t>(location.offset) + location.length, - poolSizes[location.poolIndex]) - << "OperandValue location out of range. Starts at " << location.offset - << ", length " << location.length << ", max " << poolSizes[location.poolIndex]; - return Version::ANDROID_OC_MR1; - case Operand::LifeTime::TEMPORARY_VARIABLE: - case Operand::LifeTime::SUBGRAPH_INPUT: - case Operand::LifeTime::SUBGRAPH_OUTPUT: - case Operand::LifeTime::NO_VALUE: - NN_VALIDATE(location.pointer == kNullptrVariant) - << "Unexpected pointer value for operand of lifetime " << operand.lifetime; - NN_VALIDATE_EQ(location.poolIndex, 0u) - << "Unexpected poolIndex " << location.poolIndex << " for operand of lifetime " - << operand.lifetime; - NN_VALIDATE_EQ(location.offset, 0u) << "Unexpected offset " << location.offset - << " for operand of lifetime " << operand.lifetime; - NN_VALIDATE_EQ(location.length, 0u) << "Unexpected length " << location.length - << " for operand of lifetime " << operand.lifetime; - return Version::ANDROID_OC_MR1; - case Operand::LifeTime::SUBGRAPH: { - NN_VALIDATE(location.pointer == kNullptrVariant) << "SUBGRAPH with a non-null pointer"; - NN_VALIDATE_EQ(location.poolIndex, 0u) - << "SUBGRAPH with a non-zero poolIndex " << location.poolIndex; - NN_VALIDATE_LT(location.offset, subgraphs.size()) - << "Subgraph index out of range: " << location.offset - << " >= " << subgraphs.size(); - NN_VALIDATE_EQ(location.length, 0u) - << "SUBGRAPH with a non-zero length " << location.length; - const auto version = NN_TRY(validateModelSubgraph( - subgraphs[location.offset], location.offset, operandValuesSize, poolSizes, - subgraphs, subgraphVersionCache)); - return combineVersions(version, Version::ANDROID_R); - } - case Operand::LifeTime::POINTER: { - const bool nonNull = - std::visit([](auto* ptr) { return ptr != nullptr; }, location.pointer); - NN_VALIDATE(nonNull) << "POINTER with a null pointer"; - NN_VALIDATE_EQ(location.poolIndex, 0u) - << "POINTER with a non-zero poolIndex " << location.poolIndex; - NN_VALIDATE_EQ(location.offset, 0u) - << "POINTER with a non-zero offset " << location.offset; - return Version::ANDROID_OC_MR1; - } - } - NN_VALIDATE_FAIL() << "Invalid Operand::LifeTime " << operand.lifetime; -} - -Result<Version> validateOperandDimensions(const Operand& operand) { - switch (operand.type) { - case OperandType::FLOAT32: - case OperandType::INT32: - case OperandType::UINT32: - case OperandType::BOOL: - case OperandType::FLOAT16: - case OperandType::SUBGRAPH: - case OperandType::OEM: - NN_VALIDATE(operand.dimensions.empty()) - << "Scalar data has dimensions of rank " << operand.dimensions.size(); - return Version::ANDROID_OC_MR1; - case OperandType::TENSOR_FLOAT32: - case OperandType::TENSOR_INT32: - case OperandType::TENSOR_QUANT8_ASYMM: - case OperandType::TENSOR_QUANT16_SYMM: - case OperandType::TENSOR_FLOAT16: - case OperandType::TENSOR_BOOL8: - case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: - case OperandType::TENSOR_QUANT16_ASYMM: - case OperandType::TENSOR_QUANT8_SYMM: - case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: - case OperandType::TENSOR_OEM_BYTE: { - if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY || - operand.lifetime == Operand::LifeTime::CONSTANT_REFERENCE || - operand.lifetime == Operand::LifeTime::POINTER) { - NN_VALIDATE(!operand.dimensions.empty()) - << "Tensor has lifetime of " << operand.lifetime - << " but dimensions of rank 0"; - const auto size = getNonExtensionSize(operand); - NN_VALIDATE(size.has_value()) << "Tensor dimensions overflow"; - NN_VALIDATE_NE(size.value(), 0u) << "Tensor has at least one unknown dimension"; - } - // TODO(b/165152547): aren't NO_VALUE arguments allowed to be .empty() even before - // Android Q? - if (operand.dimensions.empty()) { - // Unspecified rank was added in Android Q. - return Version::ANDROID_Q; - } - return Version::ANDROID_OC_MR1; - } - } - if (isExtension(operand.type)) { - // Extension types were added in Android Q. - return Version::ANDROID_Q; - } - NN_VALIDATE_FAIL() << "Invalid OperandType " << operand.type; -} - -Result<Version> validateOperandScale(const Operand& operand) { - switch (operand.type) { - case OperandType::FLOAT32: - case OperandType::INT32: - case OperandType::UINT32: - case OperandType::TENSOR_FLOAT32: - case OperandType::BOOL: - case OperandType::TENSOR_FLOAT16: - case OperandType::TENSOR_BOOL8: - case OperandType::FLOAT16: - case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: - case OperandType::SUBGRAPH: - NN_VALIDATE_EQ(operand.scale, 0.0f) - << "Operand of type " << operand.type << " with a non-zero scale (" - << operand.scale << ")"; - return Version::ANDROID_OC_MR1; - case OperandType::TENSOR_INT32: - // TENSOR_INT32 may be used with or without scale, depending on the operation. - // TODO(b/119869082) We should have a separate type for TENSOR_INT32 with a scale. - NN_VALIDATE_GE(operand.scale, 0.0f) - << "Operand of type " << operand.type << " with a negative scale"; - return Version::ANDROID_OC_MR1; - case OperandType::TENSOR_QUANT8_ASYMM: - case OperandType::TENSOR_QUANT16_SYMM: - case OperandType::TENSOR_QUANT16_ASYMM: - case OperandType::TENSOR_QUANT8_SYMM: - case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: - NN_VALIDATE_GT(operand.scale, 0.0f) - << "Operand of type " << operand.type << " with a non-positive scale"; - return Version::ANDROID_OC_MR1; - case OperandType::OEM: - case OperandType::TENSOR_OEM_BYTE: - // No validation for OEM types. - return Version::ANDROID_OC_MR1; - } - if (isExtension(operand.type)) { - NN_VALIDATE_EQ(operand.scale, 0.0f) << "Operand of type " << operand.type - << " with a non-zero scale (" << operand.scale << ")"; - return Version::ANDROID_Q; - } - NN_VALIDATE_FAIL() << "Invalid OperandType " << operand.type; -} - -Result<Version> validateOperandZeroPoint(const Operand& operand) { - switch (operand.type) { - case OperandType::FLOAT32: - case OperandType::INT32: - case OperandType::UINT32: - case OperandType::TENSOR_FLOAT32: - case OperandType::TENSOR_INT32: - case OperandType::BOOL: - case OperandType::TENSOR_FLOAT16: - case OperandType::TENSOR_BOOL8: - case OperandType::FLOAT16: - case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: - case OperandType::TENSOR_QUANT8_SYMM: - case OperandType::SUBGRAPH: - NN_VALIDATE_EQ(operand.zeroPoint, 0) - << "Operand of type " << operand.type << " with a non-zero zeroPoint " - << operand.zeroPoint; - return Version::ANDROID_OC_MR1; - case OperandType::TENSOR_QUANT8_ASYMM: - NN_VALIDATE(operand.zeroPoint >= 0 && operand.zeroPoint <= 255) - << "Operand of type " << operand.type << " with an invalid zeroPoint " - << operand.zeroPoint << ", must be in range [0, 255]"; - return Version::ANDROID_OC_MR1; - case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: - NN_VALIDATE(operand.zeroPoint >= -128 && operand.zeroPoint <= 127) - << "Operand of type " << operand.type << " with an invalid zeroPoint " - << operand.zeroPoint << ", must be in range [-128, 127]"; - return Version::ANDROID_OC_MR1; - case OperandType::TENSOR_QUANT16_ASYMM: - NN_VALIDATE(operand.zeroPoint >= 0 && operand.zeroPoint <= 65535) - << "Operand of type " << operand.type << " with an invalid zeroPoint " - << operand.zeroPoint << ", must be in range [0, 65535]"; - return Version::ANDROID_OC_MR1; - case OperandType::TENSOR_QUANT16_SYMM: - NN_VALIDATE_EQ(operand.zeroPoint, 0) - << "Operand of type " << operand.type << " with a non-zero zeroPoint " - << operand.zeroPoint; - return Version::ANDROID_OC_MR1; - case OperandType::OEM: - case OperandType::TENSOR_OEM_BYTE: - // No validation for OEM types. - return Version::ANDROID_OC_MR1; - } - if (isExtension(operand.type)) { - NN_VALIDATE_EQ(operand.zeroPoint, 0) << "Operand of type " << operand.type - << " with a non-zero zeroPoint " << operand.zeroPoint; - return Version::ANDROID_Q; - } - NN_VALIDATE_FAIL() << "Invalid OperandType " << operand.type; -} - -Result<Version> validateOperandExtraParams(const Operand& operand) { - switch (operand.type) { - case OperandType::FLOAT32: - case OperandType::INT32: - case OperandType::UINT32: - case OperandType::TENSOR_FLOAT32: - case OperandType::TENSOR_INT32: - case OperandType::TENSOR_QUANT8_ASYMM: - case OperandType::BOOL: - case OperandType::TENSOR_QUANT16_SYMM: - case OperandType::TENSOR_FLOAT16: - case OperandType::TENSOR_BOOL8: - case OperandType::FLOAT16: - case OperandType::TENSOR_QUANT16_ASYMM: - case OperandType::TENSOR_QUANT8_SYMM: - case OperandType::TENSOR_QUANT8_ASYMM_SIGNED: - case OperandType::SUBGRAPH: - NN_VALIDATE(std::holds_alternative<Operand::NoParams>(operand.extraParams)) - << "Operand of type " << operand.type - << " has extraParams when there must be none"; - return Version::ANDROID_OC_MR1; - case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: { - NN_VALIDATE( - std::holds_alternative<Operand::SymmPerChannelQuantParams>(operand.extraParams)) - << "Operand of type " << operand.type - << " without a Channel Quantization params"; - const auto& channelQuant = - std::get<Operand::SymmPerChannelQuantParams>(operand.extraParams); - - const size_t count = operand.dimensions.size(); - NN_VALIDATE_LT(channelQuant.channelDim, count) - << "Operand of type " << operand.type - << " with an invalid channelQuant.channelDim " << channelQuant.channelDim - << ", must be valid dimension index in range [0, " << count << ")"; - const uint32_t expected = operand.dimensions[channelQuant.channelDim]; - NN_VALIDATE_EQ(channelQuant.scales.size(), expected) - << "Operand of type " << operand.type << " with a wrong-sized scales, expected " - << expected << " was " << channelQuant.scales.size(); - NN_VALIDATE_NE(expected, 0u) - << "Operand of type " << operand.type << " channel dimension " - << channelQuant.channelDim << " is underspecified (can't be 0)"; - for (uint32_t i = 0; i < expected; ++i) { - NN_VALIDATE_GT(channelQuant.scales[i], 0.0f) - << "Operand of type " << operand.type - << " with a non-positive value in scales[" << i - << "]=" << channelQuant.scales[i]; - } - return Version::ANDROID_Q; - } - case OperandType::OEM: - case OperandType::TENSOR_OEM_BYTE: - // No validation for OEM types. - return Version::ANDROID_OC_MR1; - } - if (isExtension(operand.type)) { - NN_VALIDATE(std::holds_alternative<Operand::NoParams>(operand.extraParams) || - std::holds_alternative<Operand::ExtensionParams>(operand.extraParams)) - << "Extension operand of type " << operand.type - << " must not have SymmPerChannelQuant extraParams"; - return Version::ANDROID_OC_MR1; - } - NN_VALIDATE_FAIL() << "Invalid OperandType " << operand.type; -} - -Result<Version> validateOperand(const Operand& operand, size_t operandValuesSize, - const std::vector<size_t>& poolSizes, - const std::vector<Model::Subgraph>& subgraphs, - std::vector<std::optional<Version>>* subgraphVersionCache) { - auto version = NN_TRY(validateOperandType(operand.type)); - version = combineVersions(version, NN_TRY(validateOperandLifeTime(operand))); - version = combineVersions(version, NN_TRY(validateOperandDimensions(operand))); - version = combineVersions(version, NN_TRY(validateOperandScale(operand))); - version = combineVersions(version, NN_TRY(validateOperandZeroPoint(operand))); - version = combineVersions(version, NN_TRY(validateOperandExtraParams(operand))); - version = combineVersions( - version, NN_TRY(validateOperandDataLocation(operand, operandValuesSize, poolSizes, - subgraphs, subgraphVersionCache))); - - // For constants, validate that the length is as expected. The other lifetimes - // expect the length to be 0. Don't validate for OEM types. - if (operand.lifetime == Operand::LifeTime::CONSTANT_REFERENCE || - operand.lifetime == Operand::LifeTime::CONSTANT_COPY || - operand.lifetime == Operand::LifeTime::POINTER) { - if (!isExtension(operand.type) && operand.type != OperandType::OEM && - operand.type != OperandType::TENSOR_OEM_BYTE) { - const auto expectedLength = getNonExtensionSize(operand).value(); - NN_VALIDATE_EQ(operand.location.length, expectedLength) - << "For operand " << operand.type << " expected a size of " << expectedLength - << " but got " << operand.location.length; - } - } - - return version; -} - -Result<std::vector<Version>> validateOperands( - const std::vector<Operand>& operands, size_t operandValuesSize, - const std::vector<size_t>& poolSizes, const std::vector<Model::Subgraph>& subgraphs, - std::vector<std::optional<Version>>* subgraphVersionCache) { - std::vector<Version> versions; - versions.reserve(operands.size()); - for (size_t i = 0; i < operands.size(); ++i) { - auto result = validateOperand(operands[i], operandValuesSize, poolSizes, subgraphs, - subgraphVersionCache); - if (!result.has_value()) { - return error() << std::move(result).error() << " for operand " << i; - } - versions.push_back(result.value()); - } - return versions; -} - -// Forward declaration. -Result<Version> validateOperationIncludingOperandVersions( - const Operation& operation, const std::vector<Operand>& operands, - const std::vector<Version>& operandVersions, const std::vector<Model::Subgraph>& subgraphs); - -Result<Version> validateOperations(const std::vector<Operation>& operations, - const std::vector<Operand>& operands, - const std::vector<Version>& operandVersions, - const std::vector<Model::Subgraph>& subgraphs) { - auto version = Version::ANDROID_OC_MR1; - for (size_t i = 0; i < operations.size(); ++i) { - auto result = validateOperationIncludingOperandVersions(operations[i], operands, - operandVersions, subgraphs); - if (!result.has_value()) { - return error() << std::move(result).error() << " for operation " << i; - } - version = combineVersions(version, result.value()); - } - return version; -} - -Result<Version> validateHandle(const Handle& handle) { - NN_VALIDATE(std::all_of(handle.fds.begin(), handle.fds.end(), - [](const base::unique_fd& fd) { return fd.ok(); })); - return Version::ANDROID_OC_MR1; -} - -Result<Version> validateSharedHandle(const SharedHandle& handle) { - NN_VALIDATE(handle != nullptr); - return validateHandle(*handle); -} - -Result<Version> validateMemory(const Memory::Ashmem& memory) { - NN_VALIDATE(memory.fd.ok()); - NN_VALIDATE_NE(memory.size, 0u); - return Version::ANDROID_OC_MR1; -} - -Result<Version> validateMemory(const Memory::Fd& memory) { - NN_VALIDATE(memory.fd.ok()); - NN_VALIDATE_NE(memory.size, 0u); - - // `prot` is allowed to be either PROT_NONE (which has a value of 0) or the bitwise OR of either - // PROT_READ or PROT_WRITE. If any other bits are set, the `prot` field is invalid. - constexpr int kAllowedBits = PROT_READ | PROT_WRITE; - NN_VALIDATE_EQ(memory.prot & ~kAllowedBits, 0); - - return Version::ANDROID_OC_MR1; -} - -Result<Version> validateMemory(const Memory::HardwareBuffer& memory) { - NN_VALIDATE(memory.handle.get() != nullptr); - return Version::ANDROID_Q; -} - -Result<Version> validateMemory(const Memory::Unknown& memory) { - NN_TRY(validateHandle(memory.handle)); - return Version::ANDROID_Q; -} - -Result<Version> validateSharedMemory(const SharedMemory& memory) { - NN_VALIDATE(memory != nullptr); - return std::visit([](const auto& x) { return validateMemory(x); }, memory->handle); -} - -Result<void> validateModelSubgraphInputOutputs(const std::vector<uint32_t>& indexes, - const std::vector<Operand>& operands, - Operand::LifeTime lifetime) { - const size_t operandCount = operands.size(); - for (uint32_t i : indexes) { - NN_VALIDATE_LT(i, operandCount) - << "Model " << lifetime << " input or output index out of range: " << i << "/" - << operandCount; - const Operand& operand = operands[i]; - NN_VALIDATE_EQ(operand.lifetime, lifetime) - << "Model " << lifetime << " operand " << i << " has lifetime of " - << operand.lifetime << " instead of the expected " << lifetime; - } - - std::vector<uint32_t> sortedIndexes = indexes; - std::sort(sortedIndexes.begin(), sortedIndexes.end()); - const auto iter = std::adjacent_find(sortedIndexes.begin(), sortedIndexes.end()); - NN_VALIDATE(iter == sortedIndexes.end()) - << "Model input or output occurs multiple times: " << *iter; - - for (size_t i = 0; i < operands.size(); ++i) { - if (operands[i].lifetime == lifetime) { - const auto containsIndex = [&sortedIndexes](size_t index) { - return binary_search(sortedIndexes.begin(), sortedIndexes.end(), index); - }; - NN_VALIDATE(containsIndex(i)) - << "Operand " << i << " marked as " << lifetime - << " but is not included in Model input or output indexes"; - } - } - - return {}; -} - -Result<void> validateExecutionOrder(const Model::Subgraph& subgraph) { - // Either the operand has a known value before model execution begins, or we've seen a writer - // for this operand while walking operands in execution order. Initialize to known operands. - std::vector<bool> operandValueKnown; - operandValueKnown.reserve(subgraph.operands.size()); - std::transform(subgraph.operands.begin(), subgraph.operands.end(), - std::back_inserter(operandValueKnown), [](const Operand& operand) { - return operand.lifetime != Operand::LifeTime::TEMPORARY_VARIABLE && - operand.lifetime != Operand::LifeTime::SUBGRAPH_OUTPUT; - }); - - // Validate that operations are sorted into execution order. - // - // If there is a cycle in the graph, the operations will not - // appear to be sorted into execution order: Some operation will - // have an input for which operandValueKnown[] is false. - for (size_t i = 0; i < subgraph.operations.size(); ++i) { - const auto& operation = subgraph.operations[i]; - - for (size_t j = 0; j < operation.inputs.size(); ++j) { - const uint32_t k = operation.inputs[j]; - NN_VALIDATE(operandValueKnown[k]) << "Operation " << i << " input " << j << " (operand " - << k << ") is read before it is written"; - } - - for (size_t j = 0; j < operation.outputs.size(); ++j) { - const uint32_t k = operation.outputs[j]; - // Assuming validateOperations() has not returned an error, we know that this output is - // TEMPORARY_VARIABLE or MODEL_OUTPUT, and so the only way operandValueKnown[k] can be - // true is if we've already seen a writer for this operand. - NN_VALIDATE(!operandValueKnown[k]) << "Operation " << i << " output " << j - << " (operand " << k << ") has already been written"; - operandValueKnown[k] = true; - } - } - - // Verify all operands are written. - for (size_t i = 0; i < subgraph.operands.size(); ++i) { - NN_VALIDATE(operandValueKnown[i]) << "Operand " << i << " is never written"; - } - - // TODO(b/77871786): verify that every operation has at least one output operand that is read? - - return {}; -} - -// Validate a subgraph, ensuring all subgraphs it depends on are also validated. -// -// `referencedIndex` is empty if the subgraph being validated is the main subgraph, otherwise it is -// the index of the referenced subgraph being validated. -// -// referenced[i] and (*subgraphVersionCache)[i] correspond to the same subgraph, and therefore -// `referenced` and `subgraphVersionCache` must have the same length. -Result<Version> validateModelSubgraph(const Model::Subgraph& subgraph, - std::optional<size_t> referencedIndex, - size_t operandValuesSize, - const std::vector<size_t>& poolSizes, - const std::vector<Model::Subgraph>& referenced, - std::vector<std::optional<Version>>* subgraphVersionCache) { - CHECK(subgraphVersionCache != nullptr); - CHECK_EQ(referenced.size(), subgraphVersionCache->size()); - - // Quickly return if the current subgraph has already been checked for its version. - if (referencedIndex.has_value()) { - if (auto version = subgraphVersionCache->at(*referencedIndex)) { - return *version; - } - } - - NN_VALIDATE(!subgraph.operands.empty()); - NN_VALIDATE(!subgraph.operations.empty()); - // TODO(b/173780642): Clarify whether subgraphs with no inputs or outputs are valid. - // NN_VALIDATE(!subgraph.inputIndexes.empty()); - // NN_VALIDATE(!subgraph.outputIndexes.empty()); - - const auto operandVersions = NN_TRY(validateOperands( - subgraph.operands, operandValuesSize, poolSizes, referenced, subgraphVersionCache)); - const auto operationsVersion = NN_TRY(validateOperations(subgraph.operations, subgraph.operands, - operandVersions, referenced)); - - // Accumulate the versions from all operands and operations. - const auto version = std::accumulate(operandVersions.begin(), operandVersions.end(), - operationsVersion, combineVersions); - - NN_TRY(validateModelSubgraphInputOutputs(subgraph.inputIndexes, subgraph.operands, - Operand::LifeTime::SUBGRAPH_INPUT)); - NN_TRY(validateModelSubgraphInputOutputs(subgraph.outputIndexes, subgraph.operands, - Operand::LifeTime::SUBGRAPH_OUTPUT)); - - NN_TRY(validateExecutionOrder(subgraph)); - - // Mark the current subgraph as having already been validated so the caller can quickly return - // if this subgraph is checked again. - if (referencedIndex.has_value()) { - subgraphVersionCache->at(*referencedIndex) = version; - } - return version; -} - -Result<Version> validateModelExtensionNamesAndPrefixes( - const std::vector<Model::ExtensionNameAndPrefix>& extensionNamesAndPrefixes) { - for (const auto& extensionNameAndPrefix : extensionNamesAndPrefixes) { - NN_VALIDATE(isValidExtensionName(extensionNameAndPrefix.name)); - } - - std::vector<std::reference_wrapper<const std::string>> names; - names.reserve(extensionNamesAndPrefixes.size()); - std::transform(extensionNamesAndPrefixes.begin(), extensionNamesAndPrefixes.end(), - std::back_inserter(names), - [](const Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { - return std::cref(extensionNameAndPrefix.name); - }); - std::sort(names.begin(), names.end(), std::less<std::string>{}); - const auto nameIter = - std::adjacent_find(names.begin(), names.end(), std::equal_to<std::string>{}); - NN_VALIDATE(nameIter == names.end()) - << "ExtensionNamesAndPrefixes has duplicate name " << nameIter->get(); - - std::vector<uint16_t> types; - types.reserve(extensionNamesAndPrefixes.size()); - std::transform(extensionNamesAndPrefixes.begin(), extensionNamesAndPrefixes.end(), - std::back_inserter(types), - [](const Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { - return extensionNameAndPrefix.prefix; - }); - std::sort(types.begin(), types.end()); - const auto typeIter = std::adjacent_find(types.begin(), types.end()); - NN_VALIDATE(typeIter == types.end()) - << "ExtensionNamesAndPrefixes has duplicate type " << *typeIter; - - const bool hasExtensions = !extensionNamesAndPrefixes.empty(); - return hasExtensions ? Version::ANDROID_Q : Version::ANDROID_OC_MR1; -} - -// Makes sure the model does not contain subgraph reference cycles. -// -// This function verifies that referencedSubgraphs[subgraphIndex] and any subgraphs it refences do -// not contain any reference cycles. `path` is used to keep track of which referenced subgraphs have -// already been visited in the current recursive reference path. `verified` is a cache to keep track -// of which referenced subgraphs have already been verified not to form reference cycles. -// -// referencedSubgraphs[i], (*path)[i], and (*verified)[i] all correspond to the same subgraph, and -// therefore `referencedSubgraphs`, `path`, and `verified` must all have the same length. -Result<void> checkNoReferenceCycles(const std::vector<Model::Subgraph>& referencedSubgraphs, - uint32_t subgraphIndex, std::vector<bool>* path, - std::vector<bool>* verified) { - CHECK(path != nullptr); - CHECK(verified != nullptr); - CHECK_EQ(referencedSubgraphs.size(), path->size()); - CHECK_EQ(referencedSubgraphs.size(), verified->size()); - const auto& subgraph = referencedSubgraphs.at(subgraphIndex); - - // Quickly return if the current subgraph has already been verified to have no reference cycles. - if ((*verified)[subgraphIndex]) { - return {}; - } - - // Add the current subgraph to the path (making sure that it is not already part of the path), - // and verify that all subgraphs this subgraph references do not contain cycles. The current - // subgraph is removed from the path only after all subgraphs this subgraph references have been - // checked. - NN_VALIDATE((*path)[subgraphIndex] == false) << "Model contains a circular subgraph reference"; - (*path)[subgraphIndex] = true; - for (const Operand& operand : subgraph.operands) { - if (operand.lifetime == Operand::LifeTime::SUBGRAPH) { - const uint32_t refSubgraphIndex = operand.location.offset; - NN_TRY(checkNoReferenceCycles(referencedSubgraphs, refSubgraphIndex, path, verified)); - } - } - (*path)[subgraphIndex] = false; - - // Mark the current subgraph as having already been verified so the caller can quickly return if - // this subgraph is checked again. - (*verified)[subgraphIndex] = true; - return {}; -} - -Result<void> checkNoReferenceCycles(const std::vector<Model::Subgraph>& referencedSubgraphs) { - const size_t count = referencedSubgraphs.size(); - std::vector<bool> path(count); - std::vector<bool> verified(count); - for (size_t i = 0; i < count; ++i) { - NN_TRY(checkNoReferenceCycles(referencedSubgraphs, i, &path, &verified)); - } - return {}; -} - -Result<Version> validateModel(const Model& model) { - auto version = NN_TRY(validateVector(model.pools, validateSharedMemory)); - version = combineVersions( - version, NN_TRY(validateModelExtensionNamesAndPrefixes(model.extensionNameToPrefix))); - - // Ignore relaxComputationFloat32toFloat16 version because in the worst case it makes the - // execution stricter. - - // Referenced models were introduced in Android R. - const bool hasReferencedModels = !model.referenced.empty(); - const auto referenceModelVersion = - hasReferencedModels ? Version::ANDROID_R : Version::ANDROID_OC_MR1; - version = combineVersions(version, referenceModelVersion); - - // Ensure that there are no cycles formed by the subgraphs. - NN_TRY(checkNoReferenceCycles(model.referenced)); - - // Get memory sizes. - const auto [operandValuesSize, poolSizes] = getMemorySizes(model); - - // Validate referenced subgraphs. - auto subgraphVersionCache = std::vector<std::optional<Version>>(model.referenced.size()); - for (size_t referencedIndex = 0; referencedIndex < model.referenced.size(); ++referencedIndex) { - const auto& subgraph = model.referenced[referencedIndex]; - const auto subgraphVersion = - NN_TRY(validateModelSubgraph(subgraph, referencedIndex, operandValuesSize, - poolSizes, model.referenced, &subgraphVersionCache)); - version = combineVersions(version, subgraphVersion); - } - - // Validate main subgraph. - const auto subgraphVersion = - NN_TRY(validateModelSubgraph(model.main, std::nullopt, operandValuesSize, poolSizes, - model.referenced, &subgraphVersionCache)); - version = combineVersions(version, subgraphVersion); - - return version; -} - -Result<Version> validateBufferDesc(const BufferDesc& bufferDesc) { - // An empty BufferDesc is the default value, so it is implicitly valid for all versions. - return bufferDesc.dimensions.empty() ? Version::ANDROID_OC_MR1 : Version::ANDROID_R; -} - -Result<Version> validateBufferRole(const BufferRole& bufferRole) { - NN_VALIDATE_GT(bufferRole.probability, 0.0f); - NN_VALIDATE_LE(bufferRole.probability, 1.0f); - return Version::ANDROID_R; -} - -Result<Version> validateRequestArgument(const Request::Argument& requestArgument, - const std::vector<size_t>& memorySizes, bool isOutput) { - const auto lifetime = requestArgument.lifetime; - const auto& location = requestArgument.location; - const auto& dimensions = requestArgument.dimensions; - - switch (lifetime) { - case Request::Argument::LifeTime::POOL: { - NN_VALIDATE(location.pointer == kNullptrVariant); - NN_VALIDATE_LT(location.poolIndex, memorySizes.size()); - // Do the addition using uint64_t to avoid potential wrap-around problems. - const auto lastPosition = - static_cast<uint64_t>(location.offset) + location.length + location.padding; - const auto memorySize = memorySizes[location.poolIndex]; - NN_VALIDATE_LE(lastPosition, memorySize); - if (memorySize > 0) { - // Must specify a positive length if the memory pool has a known size. - NN_VALIDATE_GT(location.length, 0u); - } - return Version::ANDROID_OC_MR1; - } - case Request::Argument::LifeTime::NO_VALUE: - NN_VALIDATE(location.pointer == kNullptrVariant); - NN_VALIDATE_EQ(location.poolIndex, 0u); - NN_VALIDATE_EQ(location.offset, 0u); - NN_VALIDATE_EQ(location.length, 0u); - NN_VALIDATE_EQ(location.padding, 0u); - NN_VALIDATE(dimensions.empty()); - return Version::ANDROID_OC_MR1; - case Request::Argument::LifeTime::POINTER: { - const bool isNullptr = - std::visit([](auto ptr) { return ptr == nullptr; }, location.pointer); - NN_VALIDATE(!isNullptr); - NN_VALIDATE_EQ(location.poolIndex, 0u); - NN_VALIDATE_EQ(location.offset, 0u); - NN_VALIDATE_NE(location.length, 0u); - if (isOutput) { - NN_VALIDATE(std::holds_alternative<void*>(location.pointer)); - } - return Version::ANDROID_OC_MR1; - } - } - NN_VALIDATE_FAIL() << "Invalid Request::Argument::LifeTime " << lifetime; -} - -Result<Version> validateRequestMemoryPool(const Request::MemoryPool& memoryPool) { - if (std::holds_alternative<Request::MemoryDomainToken>(memoryPool)) { - NN_VALIDATE(std::get<Request::MemoryDomainToken>(memoryPool) != kInvalidMemoryDomainToken); - return Version::ANDROID_R; - } - if (std::holds_alternative<SharedBuffer>(memoryPool)) { - NN_VALIDATE(std::get<SharedBuffer>(memoryPool) != nullptr); - return Version::ANDROID_R; - } - return validateSharedMemory(std::get<SharedMemory>(memoryPool)); -} - -Result<Version> validateRequest(const Request& request) { - auto version = NN_TRY(validateVector(request.pools, validateRequestMemoryPool)); - - // Get memory sizes. For IBuffer or MemoryDomainToken types, set size to 0. - std::vector<size_t> memorySizes; - memorySizes.reserve(request.pools.size()); - std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(memorySizes), - [](const Request::MemoryPool& memoryPool) { - const auto* memory = std::get_if<SharedMemory>(&memoryPool); - return memory != nullptr ? getSize(*memory) : 0; - }); - - for (size_t i = 0; i < request.inputs.size(); ++i) { - const auto& input = request.inputs[i]; - auto result = validateRequestArgument(input, memorySizes, /*isOutput=*/false); - if (!result.has_value()) { - return error() << std::move(result).error() << " for input RequestArgument " << i; - } - version = combineVersions(version, result.value()); - } - for (size_t i = 0; i < request.outputs.size(); ++i) { - const auto& output = request.outputs[i]; - auto result = validateRequestArgument(output, memorySizes, /*isOutput=*/true); - if (!result.has_value()) { - return error() << std::move(result).error() << " for output RequestArgument " << i; - } - version = combineVersions(version, result.value()); - } - - return version; -} - -Result<Version> validateOptionalTimePoint(const OptionalTimePoint& optionalTimePoint) { - if (optionalTimePoint.has_value()) { - NN_VALIDATE_GE(optionalTimePoint->time_since_epoch().count(), 0); - } - // An omitted time point is the default value, so it is implicitly valid for all versions. - return !optionalTimePoint.has_value() ? Version::ANDROID_OC_MR1 : Version::ANDROID_R; -} - -Result<Version> validateOptionalTimeoutDuration(const OptionalDuration& optionalTimeoutDuration) { - if (optionalTimeoutDuration.has_value()) { - NN_VALIDATE_GE(optionalTimeoutDuration->count(), 0); - } - // An omitted duration is the default value, so it is implicitly valid for all versions. - return !optionalTimeoutDuration.has_value() ? Version::ANDROID_OC_MR1 : Version::ANDROID_R; -} - -Result<Version> validateCacheToken(const CacheToken& cacheToken) { - // A CacheToken of 0 is the default value, so it is implicitly valid for all versions. - constexpr auto kDefaultCacheToken = CacheToken{}; - return cacheToken == kDefaultCacheToken ? Version::ANDROID_OC_MR1 : Version::ANDROID_Q; -} - -Result<Version> validateSyncFence(const SyncFence& syncFence) { - // The absence of a sync fence is implicitly valid for all versions. - if (!syncFence.hasFd()) { - return Version::ANDROID_OC_MR1; - } - NN_VALIDATE_GE(syncFence.getFd(), 0); - return Version::ANDROID_R; -} - -Result<Version> validateRequestArgumentsForModel( - const std::vector<Request::Argument>& requestArguments, - const std::vector<uint32_t>& operandIndexes, const std::vector<Operand>& operands, - bool isOutput, bool allowUnspecifiedOutput) { - auto version = Version::ANDROID_OC_MR1; - // The request should specify as many arguments as were described in the model. - const std::string_view type = isOutput ? "output" : "input"; - const size_t requestArgumentCount = requestArguments.size(); - NN_VALIDATE_EQ(requestArgumentCount, operandIndexes.size()) - << "Request specifies " << requestArgumentCount << " " << type << "s but the model has " - << operandIndexes.size(); - for (size_t requestArgumentIndex = 0; requestArgumentIndex < requestArgumentCount; - requestArgumentIndex++) { - const Request::Argument& requestArgument = requestArguments[requestArgumentIndex]; - // Get the operand index for this argument. We extract it from the list - // that was provided in the call to ANeuralNetworksModel_identifyInputsAndOutputs. - // We assume in this function that the model has been validated already. - const uint32_t operandIndex = operandIndexes[requestArgumentIndex]; - const Operand& operand = operands[operandIndex]; - if (requestArgument.lifetime != Request::Argument::LifeTime::NO_VALUE) { - const bool isExtensionType = isExtension(operand.type); - // If the argument specified a dimension, validate it. - uint32_t modelRank = operand.dimensions.size(); - uint32_t requestRank = requestArgument.dimensions.size(); - if (requestRank == 0) { - // NOTE: validateRequestArguments cannot validate unknown tensor rank with - // extension operand type. - if (!isExtensionType && !isNonExtensionScalar(operand.type)) { - if (modelRank <= 0) { - NN_VALIDATE(isOutput) - << "Model has unknown input rank but the request does not " - "specify the rank."; - NN_VALIDATE(allowUnspecifiedOutput) - << "Model has unknown output rank and request does not specify it."; - // Unspecified output dimensions introduced in Android Q. - version = combineVersions(version, Version::ANDROID_Q); - } - } - // Validate that all the dimensions are specified in the model. - for (size_t i = 0; i < modelRank; i++) { - if (operand.dimensions[i] == 0) { - NN_VALIDATE(isOutput && allowUnspecifiedOutput) - << "Model has dimension " << i - << " set to 0 but the request does not specify the dimension."; - // Unspecified output dimensions introduced in Android Q. - version = combineVersions(version, Version::ANDROID_Q); - } - } - } else { - NN_VALIDATE(modelRank == 0 || requestRank == modelRank) - << "Request " << type << " " << requestArgumentIndex - << " has number of dimensions (" << requestRank - << ") different than the model's (" << modelRank << ")"; - for (size_t i = 0; i < requestRank; i++) { - NN_VALIDATE(modelRank == 0 || operand.dimensions[i] == 0 || - requestArgument.dimensions[i] == operand.dimensions[i]) - << "Request " << type << " " << requestArgumentIndex - << " has dimension " << i << " of " << requestArgument.dimensions[i] - << " different than the model's " << operand.dimensions[i]; - if (requestArgument.dimensions[i] == 0) { - NN_VALIDATE(isOutput && allowUnspecifiedOutput) - << "Request " << type << " " << requestArgumentIndex - << " has dimension " << i << " of zero"; - // Unspecified output dimensions introduced in Android Q. - version = combineVersions(version, Version::ANDROID_Q); - } - } - } - // NOTE: validateRequestArguments cannot validate DataLocation::length - // with extension operand type. - if (!isExtensionType && requestArgument.location.length != 0) { - const auto dimensions = - NN_TRY(combineDimensions(operand.dimensions, requestArgument.dimensions)); - const size_t expectedLength = getNonExtensionSize(operand.type, dimensions).value(); - if (expectedLength != 0) { - NN_VALIDATE_EQ(requestArgument.location.length, expectedLength) - << "Request " << type << " " << requestArgumentIndex - << " expected a size of " << expectedLength << " but got " - << requestArgument.location.length; - } - } - } - } - return version; -} - -Result<Version> validateRequestForModelImpl(const Request& request, const Model& model, - bool allowUnspecifiedOutput) { - auto version = NN_TRY(validateRequest(request)); - version = combineVersions(version, NN_TRY(validateModel(model))); - version = combineVersions(version, - NN_TRY(validateRequestArgumentsForModel( - request.inputs, model.main.inputIndexes, model.main.operands, - /*isOutput=*/false, /*allowUnspecifiedOutput=*/true))); - version = combineVersions( - version, NN_TRY(validateRequestArgumentsForModel( - request.outputs, model.main.outputIndexes, model.main.operands, - /*isOutput=*/true, allowUnspecifiedOutput))); - return version; -} - -Result<Version> validateMemoryDescImpl( - const BufferDesc& desc, const std::vector<SharedPreparedModel>& preparedModels, - const std::vector<BufferRole>& inputRoles, const std::vector<BufferRole>& outputRoles, - const std::function<const Model*(const SharedPreparedModel&)>& getModel, - std::set<PreparedModelRole>* preparedModelRoles, Operand* combinedOperand) { - NN_VALIDATE(!preparedModels.empty()); - NN_VALIDATE(!inputRoles.empty() || !outputRoles.empty()); - - std::set<PreparedModelRole> roles; - std::vector<nn::Operand> operands; - operands.reserve(inputRoles.size() + outputRoles.size()); - for (const auto& role : inputRoles) { - NN_VALIDATE_LT(role.modelIndex, preparedModels.size()); - const auto& preparedModel = preparedModels[role.modelIndex]; - NN_VALIDATE(preparedModel != nullptr); - const auto* model = getModel(preparedModel); - NN_VALIDATE(model != nullptr); - const auto& inputIndexes = model->main.inputIndexes; - NN_VALIDATE_LT(role.ioIndex, inputIndexes.size()); - NN_VALIDATE_GT(role.probability, 0.0f); - NN_VALIDATE_LE(role.probability, 1.0f); - const auto [it, success] = roles.emplace(preparedModel.get(), IOType::INPUT, role.ioIndex); - NN_VALIDATE(success); - operands.push_back(model->main.operands[inputIndexes[role.ioIndex]]); - } - for (const auto& role : outputRoles) { - NN_VALIDATE_LT(role.modelIndex, preparedModels.size()); - const auto& preparedModel = preparedModels[role.modelIndex]; - NN_VALIDATE(preparedModel != nullptr); - const auto* model = getModel(preparedModel); - NN_VALIDATE(model != nullptr); - const auto& outputIndexes = model->main.outputIndexes; - NN_VALIDATE_LT(role.ioIndex, outputIndexes.size()); - NN_VALIDATE_GT(role.probability, 0.0f); - NN_VALIDATE_LE(role.probability, 1.0f); - const auto [it, success] = roles.emplace(preparedModel.get(), IOType::OUTPUT, role.ioIndex); - NN_VALIDATE(success); - operands.push_back(model->main.operands[outputIndexes[role.ioIndex]]); - } - - CHECK(!operands.empty()); - const auto opType = operands.front().type; - - Dimensions dimensions = desc.dimensions; - for (const auto& operand : operands) { - NN_VALIDATE_EQ(operand.type, opType) << operand.type << " vs " << operands.front().type; - NN_VALIDATE_EQ(operand.scale, operands.front().scale); - NN_VALIDATE_EQ(operand.zeroPoint, operands.front().zeroPoint); - // NOTE: validateMemoryDesc cannot validate extra parameters for extension operand type. - if (!isExtension(opType)) { - NN_VALIDATE_EQ(operand.extraParams, operands.front().extraParams) - << operand.extraParams << " vs " << operands.front().extraParams; - } - dimensions = NN_TRY(combineDimensions(dimensions, operand.dimensions)); - } - - // NOTE: validateMemoryDesc cannot validate scalar dimensions with extension operand type. - if (!isExtension(opType)) { - NN_VALIDATE(!isNonExtensionScalar(opType) || dimensions.empty()) - << "invalid dimensions with scalar operand type."; - } - - if (preparedModelRoles != nullptr) { - *preparedModelRoles = std::move(roles); - } - if (combinedOperand != nullptr) { - *combinedOperand = operands.front(); - combinedOperand->dimensions = dimensions; - } - return Version::ANDROID_R; -} - -class OperationValidationContext : public IOperationValidationContext { - DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext); - - public: - OperationValidationContext(const char* operationName, const std::vector<uint32_t>& inputIndexes, - const std::vector<uint32_t>& outputIndexes, - const std::vector<Operand>& operands) - : operationName(operationName), - inputIndexes(inputIndexes), - outputIndexes(outputIndexes), - operands(operands) {} - - const char* getOperationName() const override; - - uint32_t getNumInputs() const override; - OperandType getInputType(uint32_t index) const override; - Shape getInputShape(uint32_t index) const override; - const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override; - - uint32_t getNumOutputs() const override; - OperandType getOutputType(uint32_t index) const override; - Shape getOutputShape(uint32_t index) const override; - - private: - const Operand* getInputOperand(uint32_t index) const; - const Operand* getOutputOperand(uint32_t index) const; - - const char* operationName; - const std::vector<uint32_t>& inputIndexes; - const std::vector<uint32_t>& outputIndexes; - const std::vector<Operand>& operands; -}; - -const char* OperationValidationContext::getOperationName() const { - return operationName; -} - -const Operand* OperationValidationContext::getInputOperand(uint32_t index) const { - return &operands.at(inputIndexes.at(index)); -} - -const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const { - return &operands.at(outputIndexes.at(index)); -} - -uint32_t OperationValidationContext::getNumInputs() const { - auto count = inputIndexes.size(); - CHECK_LE(count, std::numeric_limits<uint32_t>::max()); - return static_cast<uint32_t>(count); -} - -uint32_t OperationValidationContext::getNumOutputs() const { - auto count = outputIndexes.size(); - CHECK_LE(count, std::numeric_limits<uint32_t>::max()); - return static_cast<uint32_t>(count); -} - -OperandType OperationValidationContext::getInputType(uint32_t index) const { - return getInputOperand(index)->type; -} - -Shape OperationValidationContext::getInputShape(uint32_t index) const { - const Operand* operand = getInputOperand(index); - return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, - operand->extraParams}; -} - -const Operand::ExtraParams& OperationValidationContext::getInputExtraParams(uint32_t index) const { - return getInputOperand(index)->extraParams; -} - -OperandType OperationValidationContext::getOutputType(uint32_t index) const { - return getOutputOperand(index)->type; -} - -Shape OperationValidationContext::getOutputShape(uint32_t index) const { - const Operand* operand = getOutputOperand(index); - return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, - operand->extraParams}; -} - -// TODO(b/169345292): reduce the duplicate validation here - -Result<void> validateOperandSymmPerChannelQuantParamsImpl( - const Operand& operand, const Operand::SymmPerChannelQuantParams& channelQuant, - const char* tag) { - if (operand.type != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - NN_VALIDATE_FAIL(); - } - - NN_VALIDATE_LT(channelQuant.channelDim, operand.dimensions.size()) << tag; - NN_VALIDATE(!channelQuant.scales.empty()) << tag; - NN_VALIDATE_EQ(channelQuant.scales.size(), operand.dimensions[channelQuant.channelDim]) << tag; - NN_VALIDATE_NE(operand.dimensions[channelQuant.channelDim], 0u) - << tag << " channel dimension " << channelQuant.channelDim << " is underspecified"; - for (uint32_t i = 0; i < operand.dimensions[channelQuant.channelDim]; i++) { - NN_VALIDATE_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]"; - } - return {}; -} - -Result<void> validateScalarDimensions(const Operand& type, const char* tag) { - NN_VALIDATE(type.dimensions.empty()) << tag << " invalid dimensions for scalar type"; - return {}; -} - -Result<void> validateQuant8AsymmParams(const Operand& type, const char* tag) { - NN_VALIDATE(0 <= type.zeroPoint && type.zeroPoint <= 255) - << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_VALIDATE_GT(type.scale, 0.0f) << tag << " invalid scale"; - return {}; -} - -Result<void> validateQuant8AsymmSignedParams(const Operand& type, const char* tag) { - NN_VALIDATE(-128 <= type.zeroPoint && type.zeroPoint <= 127) - << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_VALIDATE_GT(type.scale, 0.0f) << tag << " invalid scale"; - return {}; -} - -Result<void> validateQuant8SymmParams(const Operand& type, const char* tag) { - NN_VALIDATE_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_VALIDATE_GT(type.scale, 0.0f) << tag << " invalid scale"; - return {}; -} - -Result<void> validateQuant16AsymmParams(const Operand& type, const char* tag) { - NN_VALIDATE(0 <= type.zeroPoint && type.zeroPoint <= 65535) - << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_VALIDATE_GT(type.scale, 0.0f) << tag << " invalid scale"; - return {}; -} - -Result<void> validateQuantSymmParams(const Operand& type, const char* tag) { - NN_VALIDATE_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; - NN_VALIDATE_GT(type.scale, 0.0f) << tag << " invalid scale"; - return {}; -} - -Result<void> validateNoQuantParams(const Operand& type, const char* tag) { - NN_VALIDATE_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; - NN_VALIDATE_EQ(type.scale, 0.0f) << tag << " scale is not zero"; - return {}; -} - -Result<void> validateTensorDimensions( - const Operand& type, const Extension::OperandTypeInformation* extensionOperandTypeInfo, - const char* tag, bool allowPartial) { - if (!allowPartial) { - NN_VALIDATE(!type.dimensions.empty()) << tag << " invalid operand dimensions"; - } - uint64_t size = isExtension(type.type) ? extensionOperandTypeInfo->byteSize - : getNonExtensionSize(type.type); - constexpr uint64_t kMaxSize = std::numeric_limits<uint32_t>::max(); - for (size_t i = 0; i < type.dimensions.size(); i++) { - if (!allowPartial) { - NN_VALIDATE_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions"; - } - if (type.dimensions[i] != 0) { - size *= type.dimensions[i]; - NN_VALIDATE_LE(size, kMaxSize) << tag << " operand byte size exceeds " << kMaxSize; - } - } - return {}; -} - -Result<void> validateOperandTypeImpl( - const Operand& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, - bool allowPartial) { - if (isExtension(type.type)) { - NN_VALIDATE(extensionOperandTypeInfo != nullptr); - if (extensionOperandTypeInfo->isTensor) { - NN_TRY(validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); - } else { - NN_TRY(validateScalarDimensions(type, tag)); - } - return validateNoQuantParams(type, tag); - } - - NN_VALIDATE(extensionOperandTypeInfo == nullptr); - NN_TRY(validateOperandType(type.type)); - - if (isNonExtensionScalar(type.type)) { - NN_TRY(validateScalarDimensions(type, tag)); - if (type.type != OperandType::OEM) { // Historically, we have allowed OEM types - // to use quantization parameters. - NN_TRY(validateNoQuantParams(type, tag)); - } - } else { - NN_TRY(validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); - if (type.type == OperandType::TENSOR_QUANT8_ASYMM) { - NN_TRY(validateQuant8AsymmParams(type, tag)); - } else if (type.type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_TRY(validateQuant8AsymmSignedParams(type, tag)); - } else if (type.type == OperandType::TENSOR_QUANT8_SYMM) { - NN_TRY(validateQuant8SymmParams(type, tag)); - } else if (type.type == OperandType::TENSOR_QUANT16_ASYMM) { - NN_TRY(validateQuant16AsymmParams(type, tag)); - } else if (type.type == OperandType::TENSOR_QUANT16_SYMM) { - NN_TRY(validateQuantSymmParams(type, tag)); - } else if (type.type == OperandType::TENSOR_INT32 || - type.type == OperandType::TENSOR_OEM_BYTE) { - // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters. - // Historically, we have allowed OEM types to use quantization parameters. - } else { - NN_TRY(validateNoQuantParams(type, tag)); - } - } - - return {}; -} - -Result<void> validateOperandListImpl(const std::vector<uint32_t>& list, size_t operandCount, - const char* tag) { - for (size_t i = 0; i < list.size(); i++) { - NN_VALIDATE_LT(list[i], operandCount) << tag << " invalid operand index at " << i << " = " - << list[i] << ", operandCount " << operandCount; - } - return {}; -} - -Result<void> validateOperationOperandTypes(const std::vector<Operand>& operands, - const std::vector<uint32_t>& inputIndexes, - const std::vector<OperandType>& inExpectedTypes, - const std::vector<uint32_t>& outputIndexes, - const std::vector<OperandType>& outExpectedInTypes) { - NN_VALIDATE_EQ(inputIndexes.size(), inExpectedTypes.size()) - << "Wrong operand count: expected " << inputIndexes.size() << " inputs, got " - << inputIndexes.size() << " inputs"; - NN_VALIDATE_EQ(outputIndexes.size(), outExpectedInTypes.size()) - << "Wrong operand count: expected " << outputIndexes.size() << " outputs, got " - << outputIndexes.size() << " outputs"; - for (size_t i = 0; i < inputIndexes.size(); i++) { - NN_VALIDATE_EQ(operands[inputIndexes[i]].type, inExpectedTypes[i]) - << "Invalid input tensor type " << operands[inputIndexes[i]].type << " for input " - << i << ", expected " << inExpectedTypes[i]; - } - for (size_t i = 0; i < outputIndexes.size(); i++) { - NN_VALIDATE_EQ(operands[outputIndexes[i]].type, outExpectedInTypes[i]) - << "Invalid output tensor type " << operands[outputIndexes[i]].type << " for input " - << i << ", expected " << outExpectedInTypes[i]; - } - - return {}; -} - -Result<void> validateSubgraphReference(const std::vector<Model::Subgraph>& subgraphs, - const Operand& modelOperand) { - NN_VALIDATE_EQ(modelOperand.type, OperandType::SUBGRAPH) - << "Unexpected operand type: " << modelOperand.type; - NN_VALIDATE_LT(modelOperand.location.offset, subgraphs.size()) << "Invalid subgraph reference"; - return {}; -} -const Model::Subgraph& getSubgraph(const std::vector<Model::Subgraph>& subgraphs, - const Operand& modelOperand) { - return subgraphs.at(modelOperand.location.offset); -} -uint32_t getInputCount(const std::vector<Model::Subgraph>& subgraphs, const Operand& modelOperand) { - return getSubgraph(subgraphs, modelOperand).inputIndexes.size(); -} -uint32_t getOutputCount(const std::vector<Model::Subgraph>& subgraphs, - const Operand& modelOperand) { - return getSubgraph(subgraphs, modelOperand).outputIndexes.size(); -} -const Operand& getInputOperand(const std::vector<Model::Subgraph>& subgraphs, - const Operand& modelOperand, uint32_t index) { - const Model::Subgraph& subgraph = getSubgraph(subgraphs, modelOperand); - return subgraph.operands.at(subgraph.inputIndexes.at(index)); -} -const Operand& getOutputOperand(const std::vector<Model::Subgraph>& subgraphs, - const Operand& modelOperand, uint32_t index) { - const Model::Subgraph& subgraph = getSubgraph(subgraphs, modelOperand); - return subgraph.operands.at(subgraph.outputIndexes.at(index)); -} - -// Checks if two operands have the same types, ranks (if specified), dimensions -// (if specified), scales, zeroPoints, and extraParams. -Result<void> compatible(const Operand& a, const Operand& b) { - NN_VALIDATE_EQ(a.type, b.type) << a.type << " != " << b.type; - if (!a.dimensions.empty() && !b.dimensions.empty()) { - NN_VALIDATE_EQ(a.dimensions.size(), b.dimensions.size()) << "Incompatible dimensions"; - for (uint32_t i = 0, n = a.dimensions.size(); i < n; ++i) { - if (a.dimensions[i] != 0 && b.dimensions[i] != 0) { - NN_VALIDATE_EQ(a.dimensions[i], b.dimensions[i]) << "Incompatible dimensions"; - } - } - } - NN_VALIDATE_EQ(a.scale, b.scale); - NN_VALIDATE_EQ(a.zeroPoint, b.zeroPoint); - NN_VALIDATE_EQ(a.extraParams, b.extraParams) << a.extraParams << " != " << b.extraParams; - return {}; -} - -Result<void> validateConditionOperand(const Operand& operand) { - NN_VALIDATE_EQ(operand.type, OperandType::TENSOR_BOOL8) - << "Unexpected condition operand type: " << operand.type; - NN_VALIDATE_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton"; - NN_VALIDATE_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton"; - return {}; -} - -Result<Version> validateIfOperation(const std::vector<uint32_t>& inputs, - const std::vector<uint32_t>& outputs, - const std::vector<Operand>& operands, - const std::vector<Model::Subgraph>& subgraphs) { - namespace op = operation_if; - NN_VALIDATE_GE(inputs.size(), 3u) << "IF must have at least 3 inputs"; - NN_VALIDATE_GE(outputs.size(), 1u) << "IF must have at least 1 output"; - auto validateBranchOperand = [&](const Operand& branchModelOperand) -> Result<void> { - auto result = validateSubgraphReference(subgraphs, branchModelOperand); - if (!result.has_value()) { - return error() << std::move(result).error() - << " -- Operand is not a valid subgraph reference"; - } - const uint32_t branchModelInputCount = getInputCount(subgraphs, branchModelOperand); - const uint32_t branchModelOutputCount = getOutputCount(subgraphs, branchModelOperand); - NN_VALIDATE_EQ(inputs.size(), op::kFirstInput + branchModelInputCount); - NN_VALIDATE_EQ(outputs.size(), branchModelOutputCount); - for (uint32_t i = 0; i < branchModelInputCount; ++i) { - const Operand& innerOperand = getInputOperand(subgraphs, branchModelOperand, i); - const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; - NN_TRY(compatible(innerOperand, outerOperand)); - } - for (uint32_t i = 0; i < branchModelOutputCount; ++i) { - const Operand& innerOperand = getOutputOperand(subgraphs, branchModelOperand, i); - const Operand& outerOperand = operands[outputs[i]]; - NN_TRY(compatible(innerOperand, outerOperand)); - } - return {}; - }; - auto result = validateConditionOperand(operands[inputs[op::kCondBoolOperand]]); - if (!result.has_value()) { - return error() << std::move(result).error() << " for IF condition operand"; - } - result = validateBranchOperand(operands[inputs[op::kThenModelOperand]]); - if (!result.has_value()) { - return error() << std::move(result).error() << " for IF then model"; - } - result = validateBranchOperand(operands[inputs[op::kElseModelOperand]]); - if (!result.has_value()) { - return error() << std::move(result).error() << " for IF else model"; - } - return Version::ANDROID_R; -} - -Result<Version> validateControlFlowOperandUnknownSize(const Operand& operand) { - if (!isExtension(operand.type) && getNonExtensionSize(operand).value() == 0) { - // 1.3 HAL (corresponding to Version::ANDROID_R) does not support CF operations with - // operands of unknown size. See http://b/132458982#comment63. - return Version::CURRENT_RUNTIME; - } - return Version::ANDROID_R; -} - -Result<Version> validateWhileOperation(const std::vector<uint32_t>& inputs, - const std::vector<uint32_t>& outputs, - const std::vector<Operand>& operands, - const std::vector<Model::Subgraph>& subgraphs) { - // Let the loop have - // - m >= 1 input-output operands, - // - k >= 0 state-only operands, and - // - n >= 0 input-only operands. - // Then - // - the WHILE loop operation has (2 + m + k + n) inputs and m outputs. - // - the condition model has (m + k + n) inputs and 1 output. - // - the body model has (m + k + n) inputs and (m + k) outputs. - namespace op = operation_while; - NN_VALIDATE_GE(inputs.size(), 3u) << "WHILE must have at least 3 inputs"; - NN_VALIDATE_GE(outputs.size(), 1u) << "WHILE must have at least 1 output"; - auto validateCondOperand = [&](const Operand& condModelOperand) -> Result<Version> { - Version version = Version::ANDROID_R; - auto result = validateSubgraphReference(subgraphs, condModelOperand); - if (!result.has_value()) { - return error() << std::move(result).error() - << " -- Operand is not a valid subgraph reference"; - } - const uint32_t condModelInputCount = getInputCount(subgraphs, condModelOperand); - const uint32_t condModelOutputCount = getOutputCount(subgraphs, condModelOperand); - NN_VALIDATE_EQ(inputs.size(), op::kFirstInput + condModelInputCount); - NN_VALIDATE_EQ(condModelOutputCount, 1u); - for (uint32_t i = 0; i < condModelInputCount; ++i) { - const Operand& innerOperand = getInputOperand(subgraphs, condModelOperand, i); - const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; - NN_TRY(compatible(innerOperand, outerOperand)); - version = combineVersions(version, - NN_TRY(validateControlFlowOperandUnknownSize(innerOperand))); - version = combineVersions(version, - NN_TRY(validateControlFlowOperandUnknownSize(outerOperand))); - } - NN_TRY(validateConditionOperand(getOutputOperand(subgraphs, condModelOperand, 0))); - return version; - }; - auto validateBodyOperand = [&](const Operand& bodyModelOperand) -> Result<Version> { - Version version = Version::ANDROID_R; - auto result = validateSubgraphReference(subgraphs, bodyModelOperand); - if (!result.has_value()) { - return error() << std::move(result).error() - << " -- Operand is not a valid subgraph reference"; - } - const uint32_t bodyModelInputCount = getInputCount(subgraphs, bodyModelOperand); - const uint32_t bodyModelOutputCount = getOutputCount(subgraphs, bodyModelOperand); - NN_VALIDATE_EQ(inputs.size(), op::kFirstInput + bodyModelInputCount); - NN_VALIDATE_GE(bodyModelOutputCount, outputs.size()); - NN_VALIDATE_GE(bodyModelInputCount, bodyModelOutputCount); - const uint32_t inputOutputCount = outputs.size(); - const uint32_t stateOnlyCount = bodyModelOutputCount - inputOutputCount; - const uint32_t inputOnlyCount = bodyModelInputCount - bodyModelOutputCount; - for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount + inputOnlyCount; i < n; ++i) { - const Operand& innerOperand = getInputOperand(subgraphs, bodyModelOperand, i); - const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; - NN_TRY(compatible(innerOperand, outerOperand)); - version = combineVersions(version, - NN_TRY(validateControlFlowOperandUnknownSize(innerOperand))); - version = combineVersions(version, - NN_TRY(validateControlFlowOperandUnknownSize(outerOperand))); - } - for (uint32_t i = 0; i < inputOutputCount; ++i) { - const Operand& innerOperand = getOutputOperand(subgraphs, bodyModelOperand, i); - const Operand& outerOperand = operands[outputs[i]]; - NN_TRY(compatible(innerOperand, outerOperand)); - version = combineVersions(version, - NN_TRY(validateControlFlowOperandUnknownSize(outerOperand))); - } - for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount; i < n; ++i) { - const Operand& inputOperand = getInputOperand(subgraphs, bodyModelOperand, i); - const Operand& outputOperand = getOutputOperand(subgraphs, bodyModelOperand, i); - NN_TRY(compatible(inputOperand, outputOperand)); - version = combineVersions(version, - NN_TRY(validateControlFlowOperandUnknownSize(outputOperand))); - } - return version; - }; - auto result = validateCondOperand(operands[inputs[op::kCondModelOperand]]); - if (!result.has_value()) { - return error() << std::move(result).error() << " for WHILE condition model"; - } - auto version = result.value(); - result = validateBodyOperand(operands[inputs[op::kBodyModelOperand]]); - if (!result.has_value()) { - return error() << std::move(result).error() << " for WHILE body model"; - } - version = combineVersions(version, result.value()); - return version; -} - -Result<Version> validateOperationButNotOperandsImpl(const Operation& operation, - const std::vector<Operand>& operands, - const std::vector<Model::Subgraph>& subgraphs) { - const auto opType = operation.type; - const auto& inputIndexes = operation.inputs; - const auto& outputIndexes = operation.outputs; - - NN_TRY(validateOperandListImpl(inputIndexes, operands.size(), - "ANeuralNetworksModel_addOperation inputs")); - NN_TRY(validateOperandListImpl(outputIndexes, operands.size(), - "ANeuralNetworksModel_addOperation outputs")); - - if (isExtension(opType)) { - // There is no other validation we can do for an extension operation. - return Version::ANDROID_Q; - } - - auto invalidInOutNumberMessage = [opType, &inputIndexes, &outputIndexes](int expIn, - int expOut) { - std::ostringstream os; - os << "Invalid number of input operands (" << inputIndexes.size() << ", expected " << expIn - << ") or output operands (" << outputIndexes.size() << ", expected " << expOut - << ") for operation " << opType; - return os.str(); - }; - - switch (opType) { - case OperationType::OEM_OPERATION: { - return Version::ANDROID_OC_MR1; - } - case OperationType::RESHAPE: { - NN_VALIDATE(inputIndexes.size() == 2 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(2, 1); - auto inputType = operands[inputIndexes[0]].type; - Version version; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - version = Version::ANDROID_OC_MR1; - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - version = Version::ANDROID_Q; - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - version = Version::ANDROID_OC_MR1; - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - NN_VALIDATE_LE(inputRank, 4u) - << "Unsupported input tensor rank for operation " << opType; - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::DEPTH_TO_SPACE: { - NN_VALIDATE((inputIndexes.size() == 3 || inputIndexes.size() == 2) && - outputIndexes.size() == 1) - << "Invalid number of input operands (" << inputIndexes.size() - << ", expected 3 or 2) or output operands (" << outputIndexes.size() - << ", expected 1) for operation " << opType; - auto inputType = operands[inputIndexes[0]].type; - Version version; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - version = Version::ANDROID_OC_MR1; - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - version = Version::ANDROID_Q; - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - version = Version::ANDROID_OC_MR1; - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - if (inputIndexes.size() == 3) { - inExpectedTypes.push_back(OperandType::BOOL); - version = combineVersions(version, Version::ANDROID_Q); - } else { - version = combineVersions(version, Version::ANDROID_OC_MR1); - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::SPACE_TO_DEPTH: { - NN_VALIDATE((inputIndexes.size() == 3 || inputIndexes.size() == 2) && - outputIndexes.size() == 1) - << "Invalid number of input operands (" << inputIndexes.size() - << ", expected 3 or 2) or output operands (" << outputIndexes.size() - << ", expected 1) for operation " << opType; - auto inputType = operands[inputIndexes[0]].type; - Version version; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - version = Version::ANDROID_OC_MR1; - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - version = Version::ANDROID_Q; - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - version = Version::ANDROID_OC_MR1; - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - if (inputIndexes.size() == 3) { - inExpectedTypes.push_back(OperandType::BOOL); - version = combineVersions(version, Version::ANDROID_Q); - } else { - version = combineVersions(version, Version::ANDROID_OC_MR1); - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::EMBEDDING_LOOKUP: { - NN_VALIDATE(inputIndexes.size() == 2 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(2, 1); - auto inputType = operands[inputIndexes[1]].type; - NN_VALIDATE(inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) - << "Unsupported input tensor type for operation " << opType; - Version version; - std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32, inputType}; - std::vector<OperandType> outExpectedTypes = {inputType}; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - } else if (inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM) { - version = Version::ANDROID_Q; - } else { - version = Version::ANDROID_OC_MR1; - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::HASHTABLE_LOOKUP: { - NN_VALIDATE(inputIndexes.size() == 3 && outputIndexes.size() == 2) - << invalidInOutNumberMessage(3, 2); - auto inputType = operands[inputIndexes[2]].type; - NN_VALIDATE(inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM) - << "Unsupported input tensor type for operation " << opType; - std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, inputType}; - std::vector<OperandType> outExpectedTypes = {inputType, - OperandType::TENSOR_QUANT8_ASYMM}; - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return Version::ANDROID_OC_MR1; - } - case OperationType::LSH_PROJECTION: { - NN_VALIDATE(inputIndexes.size() == 4 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(4, 1); - auto inputType = operands[inputIndexes[1]].type; - NN_VALIDATE(inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM) - << "Unsupported input tensor type for operation " << opType; - auto hashType = operands[inputIndexes[0]].type; - Version version; - std::vector<OperandType> inExpectedTypes; - if (hashType == OperandType::TENSOR_FLOAT16) { - version = Version::ANDROID_Q; - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - inputType, - OperandType::TENSOR_FLOAT16, - OperandType::INT32, - }; - } else if (hashType == OperandType::TENSOR_FLOAT32) { - version = Version::ANDROID_OC_MR1; - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - inputType, - OperandType::TENSOR_FLOAT32, - OperandType::INT32, - }; - } else { - NN_VALIDATE_FAIL() << "Unsupported hash tensor type for operation " << opType; - } - std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32}; - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::BIDIRECTIONAL_SEQUENCE_LSTM: { - const uint32_t kNumOutputs = 2; - const uint32_t kNumOutputsMerged = 1; - const uint32_t kNumOutputsWithState = 6; - const uint32_t kNumOutputsMergedWithState = 5; - NN_VALIDATE(inputIndexes.size() == 61 && - (outputIndexes.size() == kNumOutputs || - outputIndexes.size() == kNumOutputsMerged || - outputIndexes.size() == kNumOutputsWithState || - outputIndexes.size() == kNumOutputsMergedWithState)) - << "Invalid number of input operands (" << inputIndexes.size() - << ", expected 61) or output operands (" << outputIndexes.size() - << ", expected 1, 2, 5 or 6) for operation " << opType; - - std::vector<OperandType> inExpectedTypes; - auto inputType = operands[inputIndexes[0]].type; - NN_VALIDATE(inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_FLOAT16) - << "Unsupported input tensor type for operation " << opType; - - inExpectedTypes = {}; - for (int i = 0; i < 48; ++i) { - inExpectedTypes.push_back(inputType); - } - inExpectedTypes.push_back(OperandType::INT32); - inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 - ? OperandType::FLOAT32 - : OperandType::FLOAT16); - inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 - ? OperandType::FLOAT32 - : OperandType::FLOAT16); - inExpectedTypes.push_back(OperandType::BOOL); - inExpectedTypes.push_back(OperandType::BOOL); - for (int i = 0; i < 8; ++i) { - inExpectedTypes.push_back(inputType); - } - - Version version = Version::ANDROID_Q; - if (outputIndexes.size() == kNumOutputsWithState || - outputIndexes.size() == kNumOutputsMergedWithState) { - version = Version::ANDROID_R; - } - std::vector<OperandType> outExpectedTypes(outputIndexes.size(), inputType); - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::LSTM: { - NN_VALIDATE((inputIndexes.size() == 23 || inputIndexes.size() == 27) && - outputIndexes.size() == 4) - << "Invalid number of input operands (" << inputIndexes.size() - << ", expected 23 or 27) or output operands (" << outputIndexes.size() - << ", expected 4) for operation " << opType; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - auto inputType = operands[inputIndexes[0]].type; - NN_VALIDATE(inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_FLOAT16) - << "Unsupported input tensor type for operation " << opType; - - Version version = Version::ANDROID_OC_MR1; - inExpectedTypes = {inputType, inputType, inputType, inputType, inputType, - inputType, inputType, inputType, inputType, inputType, - inputType, inputType, inputType, inputType, inputType, - inputType, inputType, inputType, inputType, inputType, - OperandType::INT32}; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes.push_back(OperandType::FLOAT32); - inExpectedTypes.push_back(OperandType::FLOAT32); - } else { - version = Version::ANDROID_Q; - inExpectedTypes.push_back(OperandType::FLOAT16); - inExpectedTypes.push_back(OperandType::FLOAT16); - } - - outExpectedTypes = {inputType, inputType, inputType, inputType}; - if (inputIndexes.size() == 23) { - version = combineVersions(version, Version::ANDROID_OC_MR1); - } else { - version = combineVersions(version, Version::ANDROID_Q); - for (int i = 0; i < 4; ++i) { - inExpectedTypes.push_back(inputType); - } - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::QUANTIZED_16BIT_LSTM: { - NN_VALIDATE(inputIndexes.size() == 15 && outputIndexes.size() == 2) - << invalidInOutNumberMessage(15, 2); - std::vector<OperandType> inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM, - OperandType::TENSOR_QUANT8_ASYMM}; - std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM, - OperandType::TENSOR_QUANT8_ASYMM}; - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return Version::ANDROID_Q; - } - case OperationType::RANDOM_MULTINOMIAL: { - NN_VALIDATE(inputIndexes.size() == 3 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(3, 1); - OperandType inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_FLOAT16) { - inExpectedTypes = {inputType, OperandType::INT32, OperandType::TENSOR_INT32}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32}; - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return Version::ANDROID_Q; - } - case OperationType::RNN: { - NN_VALIDATE(inputIndexes.size() == 6 && outputIndexes.size() == 2) - << invalidInOutNumberMessage(6, 2); - OperandType inputType = operands[inputIndexes[0]].type; - Version version; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - version = Version::ANDROID_OC_MR1; - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, OperandType::INT32, - }; - outExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, - }; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - version = Version::ANDROID_Q; - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, OperandType::INT32, - }; - outExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, - }; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::SVDF: { - NN_VALIDATE(inputIndexes.size() == 7 && outputIndexes.size() == 2) - << invalidInOutNumberMessage(7, 2); - Version version; - OperandType inputType = operands[inputIndexes[0]].type; - if (inputType == OperandType::TENSOR_FLOAT32) { - version = Version::ANDROID_OC_MR1; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - version = Version::ANDROID_Q; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - std::vector<OperandType> inExpectedTypes = { - inputType, inputType, inputType, inputType, - inputType, OperandType::INT32, OperandType::INT32, - }; - std::vector<OperandType> outExpectedTypes = {inputType, inputType}; - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::BATCH_TO_SPACE_ND: { - NN_VALIDATE((inputIndexes.size() == 3 || inputIndexes.size() == 2) && - outputIndexes.size() == 1) - << "Invalid number of input operands (" << inputIndexes.size() - << ", expected 3 or 2) or output operands (" << outputIndexes.size() - << ", expected 1) for operation " << opType; - auto inputType = operands[inputIndexes[0]].type; - Version version = Version::ANDROID_OC_MR1; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - version = Version::ANDROID_Q; - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - if (inputIndexes.size() == 3) { - inExpectedTypes.push_back(OperandType::BOOL); - version = combineVersions(version, Version::ANDROID_Q); - } else { - version = combineVersions(version, Version::ANDROID_P); - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::SPACE_TO_BATCH_ND: { - NN_VALIDATE((inputIndexes.size() == 4 || inputIndexes.size() == 3) && - outputIndexes.size() == 1) - << "Invalid number of input operands (" << inputIndexes.size() - << ", expected 4 or 3) or output operands (" << outputIndexes.size() - << ", expected 1) for operation " << opType; - auto inputType = operands[inputIndexes[0]].type; - Version version = Version::ANDROID_OC_MR1; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - version = Version::ANDROID_Q; - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - if (operands[inputIndexes[0]].zeroPoint != 0) { - version = Version::ANDROID_Q; - } - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - if (inputIndexes.size() == 4) { - inExpectedTypes.push_back(OperandType::BOOL); - version = combineVersions(version, Version::ANDROID_Q); - } else { - version = combineVersions(version, Version::ANDROID_P); - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::PAD: { - NN_VALIDATE(inputIndexes.size() == 2 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(2, 1); - auto inputType = operands[inputIndexes[0]].type; - Version version; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - version = Version::ANDROID_P; - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - version = Version::ANDROID_Q; - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - } else { - if (operands[inputIndexes[0]].zeroPoint == 0) { - version = Version::ANDROID_P; - } else { - version = Version::ANDROID_Q; - } - } - inExpectedTypes = { - inputType, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {inputType}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - NN_VALIDATE_LE(inputRank, 4u) - << "Unsupported input tensor rank for operation " << opType; - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::PAD_V2: { - NN_VALIDATE(inputIndexes.size() == 3 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(3, 1); - auto inputType = operands[inputIndexes[0]].type; - Version version; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - version = Version::ANDROID_Q; - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - OperandType::FLOAT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - version = Version::ANDROID_Q; - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - OperandType::FLOAT16, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - } else { - version = Version::ANDROID_Q; - } - inExpectedTypes = { - inputType, - OperandType::TENSOR_INT32, - OperandType::INT32, - }; // TODO(b/116699425): Make it UINT8. - outExpectedTypes = {inputType}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - NN_VALIDATE_LE(inputRank, 4u) - << "Unsupported input tensor rank for operation " << opType; - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::CAST: { - NN_VALIDATE(inputIndexes.size() == 1 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(1, 1); - auto inputOperand = operands[inputIndexes[0]]; - auto outputOperand = operands[outputIndexes[0]]; - auto inputType = inputOperand.type; - auto outputType = outputOperand.type; - Version version; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if ((inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM) && - (outputType == OperandType::TENSOR_FLOAT16 || - outputType == OperandType::TENSOR_FLOAT32 || - outputType == OperandType::TENSOR_INT32 || - outputType == OperandType::TENSOR_QUANT8_ASYMM)) { - version = Version::ANDROID_Q; - inExpectedTypes = {inputType}; - outExpectedTypes = {outputType}; - } else if (inputType == OperandType::TENSOR_BOOL8 || - inputType == OperandType::TENSOR_QUANT16_ASYMM || - inputType == OperandType::TENSOR_QUANT16_SYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || - inputType == OperandType::TENSOR_QUANT8_SYMM) { - version = Version::ANDROID_R; - inExpectedTypes = {inputType}; - outExpectedTypes = {inputType}; // Only identity CAST is supported. - } else { - NN_VALIDATE_FAIL() << "Unsupported data type for operation " << opType; - } - // Validate that output shape is equal to input shape if dimensions - // are already known. - auto getNumberOfElements = [](const std::vector<uint32_t>& dims) { - if (dims.empty()) { - return 0; - } - return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<>()); - }; - NN_VALIDATE(inputOperand.dimensions.empty() || outputOperand.dimensions.empty() || - getNumberOfElements(outputOperand.dimensions) == 0 || - inputOperand.dimensions == outputOperand.dimensions); - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::MEAN: { - NN_VALIDATE(inputIndexes.size() == 3 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(3, 1); - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - NN_VALIDATE_LE(inputRank, 4u) - << "Unsupported input tensor rank for operation " << opType; - auto inputType = operands[inputIndexes[0]].type; - Version version; - if (inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM) { - version = Version::ANDROID_P; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - version = Version::ANDROID_Q; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - std::vector<OperandType> inExpectedTypes = {inputType, OperandType::TENSOR_INT32, - OperandType::INT32}; - std::vector<OperandType> outExpectedTypes = {inputType}; - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::ARGMAX: - case OperationType::ARGMIN: { - NN_VALIDATE(inputIndexes.size() == 2 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(2, 1); - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_INT32}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return Version::ANDROID_Q; - } - case OperationType::EXPAND_DIMS: { - NN_VALIDATE(inputIndexes.size() == 2 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(2, 1); - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, OperandType::INT32}; - outExpectedTypes = {inputType}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - Version version; - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - } else { - version = Version::ANDROID_Q; - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::SPLIT: { - NN_VALIDATE_EQ(inputIndexes.size(), 3u) - << "Invalid number of input operands (" << inputIndexes.size() - << ", expected 3)" << opType; - auto inputType = operands[inputIndexes[0]].type; - NN_VALIDATE(inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) - << "Unsupported input tensor type for operation " << opType; - Version version; - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - } else { - version = Version::ANDROID_Q; - } - std::vector<OperandType> inExpectedTypes = {inputType, OperandType::INT32, - OperandType::INT32}; - std::vector<OperandType> outExpectedTypes(outputIndexes.size(), inputType); - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::MAXIMUM: - case OperationType::MINIMUM: { - NN_VALIDATE(inputIndexes.size() == 2 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(2, 1); - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - OperandType inputType = operands[inputIndexes[0]].type; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, inputType}; - outExpectedTypes = {inputType}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - Version version; - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - } else { - version = Version::ANDROID_Q; - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::GROUPED_CONV_2D: { - NN_VALIDATE((inputIndexes.size() == 12 || inputIndexes.size() == 9) && - outputIndexes.size() == 1) - << "Invalid number of input operands (" << inputIndexes.size() - << ", expected 12 or 9) or output operands (" << outputIndexes.size() - << ", expected 1) for operation " << opType; - auto inputType = operands[inputIndexes[0]].type; - auto filterType = operands[inputIndexes[1]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, OperandType::INT32, - OperandType::INT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_VALIDATE(filterType == inputType || - filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) - << "Unsupported filter tensor type for operation " << opType; - - NN_VALIDATE(filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || - std::get<Operand::SymmPerChannelQuantParams>( - operands[inputIndexes[1]].extraParams) - .channelDim == 0) - << "Unsupported filter tensor channel dimension for operation " << opType; - - inExpectedTypes = { - inputType, filterType, OperandType::TENSOR_INT32, - OperandType::INT32, OperandType::INT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32}; - outExpectedTypes = {inputType}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - - if (inputIndexes.size() == 12) { - std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32); - inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(), - explicitScalarTypes.end()); - } - inExpectedTypes.push_back(OperandType::BOOL); - Version version; - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - } else { - version = Version::ANDROID_Q; - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::TILE: { - NN_VALIDATE(inputIndexes.size() == 2 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(2, 1); - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, OperandType::TENSOR_INT32}; - outExpectedTypes = {inputType}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - Version version; - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - } else { - version = Version::ANDROID_Q; - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::POW: { - NN_VALIDATE(inputIndexes.size() == 2 && outputIndexes.size() == 1) - << invalidInOutNumberMessage(2, 1); - auto inputType = operands[inputIndexes[0]].type; - std::vector<OperandType> inExpectedTypes; - std::vector<OperandType> outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = {inputType, inputType}; - outExpectedTypes = {inputType}; - } else { - NN_VALIDATE_FAIL() << "Unsupported input tensor type for operation " << opType; - } - Version version; - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - version = Version::ANDROID_R; - } else { - version = Version::ANDROID_Q; - } - NN_TRY(validateOperationOperandTypes(operands, inputIndexes, inExpectedTypes, - outputIndexes, outExpectedTypes)); - return version; - } - case OperationType::IF: { - return validateIfOperation(inputIndexes, outputIndexes, operands, subgraphs); - } - case OperationType::WHILE: { - return validateWhileOperation(inputIndexes, outputIndexes, operands, subgraphs); - } - default: { - const OperationRegistration* operationRegistration = - BuiltinOperationResolver::get()->findOperation( - static_cast<OperationType>(opType)); - // TODO: return ErrorStatus::UNEXPECTED_NULL - NN_VALIDATE(operationRegistration != nullptr) << opType << " not registered"; - // TODO: return ErrorStatus::UNEXPECTED_NULL - NN_VALIDATE(operationRegistration->validate != nullptr) - << "Incomplete operation registration: " << opType; - - OperationValidationContext context(operationRegistration->name, inputIndexes, - outputIndexes, operands); - return operationRegistration->validate(&context); - } - } -} - -Result<Version> validateOperationIncludingOperandVersions( - const Operation& operation, const std::vector<Operand>& operands, - const std::vector<Version>& operandVersions, - const std::vector<Model::Subgraph>& subgraphs) { - auto version = NN_TRY(validateOperationButNotOperandsImpl(operation, operands, subgraphs)); - for (uint32_t index : operation.inputs) { - version = combineVersions(version, operandVersions[index]); - } - for (uint32_t index : operation.outputs) { - version = combineVersions(version, operandVersions[index]); - } - return version; -} - -} // anonymous namespace - -// Below this point are all the functions that are declared in Validation.h. The convention of this -// file is to keep the function bodies of the functions declared in Validation.h minimal, meaning -// that most functions below simply redirect to one of the functions defined above in the anonymous -// namespace. If there is a function name clash between one of the functions below and one of the -// functions above, the function in the anonymous namespace is appended with "Impl". - -Version combineVersions(Version lhs, Version rhs) { - return std::max<Version>(lhs, rhs); -} - -Result<Version> validate(const DeviceStatus& deviceStatus) { - return validateDeviceStatus(deviceStatus); -} - -Result<Version> validate(const ExecutionPreference& executionPreference) { - return validateExecutionPreference(executionPreference); -} - -Result<Version> validate(const DeviceType& deviceType) { - return validateDeviceType(deviceType); -} - -Result<Version> validate(const MeasureTiming& measureTiming) { - return validateMeasureTiming(measureTiming); -} - -Result<Version> validate(const OperandType& operandType) { - return validateOperandType(operandType); -} - -Result<Version> validate(const Priority& priority) { - return validatePriority(priority); -} - -Result<Version> validate(const ErrorStatus& errorStatus) { - return validateErrorStatus(errorStatus); -} - -Result<Version> validate(const FusedActivationFunc& activation) { - return validateFusedActivationFunc(activation); -} - -Result<Version> validate(const OutputShape& outputShape) { - return validateOutputShape(outputShape); -} - -Result<Version> validate(const Timing& timing) { - return validateTiming(timing); -} - -Result<Version> validate(const Capabilities& capabilities) { - return validateCapabilities(capabilities); -} - -Result<Version> validate(const Extension& extension) { - return validateExtension(extension); -} - -Result<Version> validate(const SharedHandle& handle) { - return validateSharedHandle(handle); -} - -Result<Version> validate(const SharedMemory& memory) { - return validateSharedMemory(memory); -} - -Result<Version> validate(const Model& model) { - return validateModel(model); -} - -Result<Version> validate(const BufferDesc& bufferDesc) { - return validateBufferDesc(bufferDesc); -} - -Result<Version> validate(const BufferRole& bufferRole) { - return validateBufferRole(bufferRole); -} - -Result<Version> validate(const Request& request) { - return validateRequest(request); -} - -Result<Version> validate(const OptionalTimePoint& optionalTimePoint) { - return validateOptionalTimePoint(optionalTimePoint); -} - -Result<Version> validate(const OptionalDuration& optionalTimeoutDuration) { - return validateOptionalTimeoutDuration(optionalTimeoutDuration); -} - -Result<Version> validate(const CacheToken& cacheToken) { - return validateCacheToken(cacheToken); -} - -Result<Version> validate(const SyncFence& syncFence) { - return validateSyncFence(syncFence); -} - -Result<Version> validate(const std::vector<OutputShape>& outputShapes) { - return validateVector(outputShapes, validateOutputShape); -} - -Result<Version> validate(const std::vector<Extension>& extensions) { - return validateExtensions(extensions); -} - -Result<Version> validate(const std::vector<SharedHandle>& handles) { - return validateVector(handles, validateSharedHandle); -} - -Result<Version> validate(const std::vector<BufferRole>& bufferRoles) { - return validateVector(bufferRoles, validateBufferRole); -} - -Result<Version> validate(const std::vector<SyncFence>& syncFences) { - return validateVector(syncFences, validateSyncFence); -} - -Result<Version> validateRequestForModel(const Request& request, const Model& model, - bool allowUnspecifiedOutput) { - return validateRequestForModelImpl(request, model, allowUnspecifiedOutput); -} - -Result<Version> validateMemoryDesc( - const BufferDesc& desc, const std::vector<SharedPreparedModel>& preparedModels, - const std::vector<BufferRole>& inputRoles, const std::vector<BufferRole>& outputRoles, - const std::function<const Model*(const SharedPreparedModel&)>& getModel, - std::set<PreparedModelRole>* preparedModelRoles, Operand* combinedOperand) { - return validateMemoryDescImpl(desc, preparedModels, inputRoles, outputRoles, getModel, - preparedModelRoles, combinedOperand); -} - -Result<void> validateOperandSymmPerChannelQuantParams( - const Operand& operand, const Operand::SymmPerChannelQuantParams& channelQuant, - const char* tag) { - return validateOperandSymmPerChannelQuantParamsImpl(operand, channelQuant, tag); -} - -Result<void> validateOperandType(const Operand& type, - const Extension::OperandTypeInformation* extensionOperandTypeInfo, - const char* tag, bool allowPartial) { - return validateOperandTypeImpl(type, extensionOperandTypeInfo, tag, allowPartial); -} - -Result<void> validateOperandList(const std::vector<uint32_t>& list, size_t operandCount, - const char* tag) { - return validateOperandListImpl(list, operandCount, tag); -} - -Result<void> validateOperationButNotOperands(const Operation& operation, - const std::vector<Operand>& operands, - const std::vector<Model::Subgraph>& subgraphs) { - NN_TRY(validateOperationButNotOperandsImpl(operation, operands, subgraphs)); - return {}; -} - -struct SubgraphVersionCache { - std::vector<std::optional<Version>> cache; -}; - -std::unique_ptr<SubgraphVersionCache, void (*)(SubgraphVersionCache*)> createSubgraphVersionCache( - size_t referencedSubgraphCount) { - auto subgraphVersionCache = std::make_unique<SubgraphVersionCache>(); - subgraphVersionCache->cache.resize(referencedSubgraphCount); - constexpr auto deleter = [](SubgraphVersionCache* ptr) { delete ptr; }; - return {subgraphVersionCache.release(), deleter}; -} - -Result<Version> validateOperationAndAnythingItDependsOn( - const Operation& operation, const std::vector<Operand>& operands, size_t operandValuesSize, - const std::vector<size_t>& poolSizes, const std::vector<Model::Subgraph>& subgraphs, - SubgraphVersionCache* subgraphVersionCache) { - CHECK(subgraphVersionCache != nullptr); - std::vector<Version> operandVersions(operands.size(), Version::ANDROID_OC_MR1); - for (uint32_t index : operation.inputs) { - NN_VALIDATE_LT(index, operands.size()); - const Operand& operand = operands[index]; - operandVersions[index] = NN_TRY(validateOperandAndAnythingItDependsOn( - operand, operandValuesSize, poolSizes, subgraphs, subgraphVersionCache)); - } - for (uint32_t index : operation.outputs) { - NN_VALIDATE_LT(index, operands.size()); - const Operand& operand = operands[index]; - operandVersions[index] = NN_TRY(validateOperandAndAnythingItDependsOn( - operand, operandValuesSize, poolSizes, subgraphs, subgraphVersionCache)); - } - return validateOperationIncludingOperandVersions(operation, operands, operandVersions, - subgraphs); -} - -Result<Version> validateOperandAndAnythingItDependsOn(const Operand& operand, - size_t operandValuesSize, - const std::vector<size_t>& poolSizes, - const std::vector<Model::Subgraph>& subgraphs, - SubgraphVersionCache* subgraphVersionCache) { - CHECK(subgraphVersionCache != nullptr); - return validateOperand(operand, operandValuesSize, poolSizes, subgraphs, - &subgraphVersionCache->cache); -} - -} // namespace android::nn
diff --git a/common/include/AidlBufferTracker.h b/common/include/AidlBufferTracker.h deleted file mode 100644 index e7afa5e..0000000 --- a/common/include/AidlBufferTracker.h +++ /dev/null
@@ -1,121 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_BUFFER_TRACKER_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_BUFFER_TRACKER_H - -#include <android-base/macros.h> -#include <android-base/thread_annotations.h> - -#include <map> -#include <memory> -#include <mutex> -#include <set> -#include <stack> -#include <utility> -#include <vector> - -#include "AidlHalInterfaces.h" -#include "AidlValidateHal.h" -#include "CpuExecutor.h" - -namespace android::nn { - -// This class manages a CPU buffer allocated on heap and provides validation methods. -class AidlManagedBuffer { - public: - static std::shared_ptr<AidlManagedBuffer> create(uint32_t size, - std::set<AidlHalPreparedModelRole> roles, - const Operand& operand); - - // Prefer AidlManagedBuffer::create. - AidlManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size, - std::set<AidlHalPreparedModelRole> roles, const Operand& operand); - - RunTimePoolInfo createRunTimePoolInfo() const { - return RunTimePoolInfo::createFromExistingBuffer(kBuffer.get(), kSize); - } - - // "poolIndex" is the index of this buffer in the request.pools. - ErrorStatus validateRequest(uint32_t poolIndex, const Request& request, - const aidl_hal::IPreparedModel* preparedModel) const; - - // "size" is the byte size of the Memory provided to the copyFrom or copyTo method. - ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const; - ErrorStatus validateCopyTo(uint32_t size) const; - - bool updateDimensions(const std::vector<uint32_t>& dimensions); - void setInitialized(bool initialized); - - private: - mutable std::mutex mMutex; - const std::unique_ptr<uint8_t[]> kBuffer; - const uint32_t kSize; - const std::set<AidlHalPreparedModelRole> kRoles; - const OperandType kOperandType; - const std::vector<uint32_t> kInitialDimensions; - std::vector<uint32_t> mUpdatedDimensions GUARDED_BY(mMutex); - bool mInitialized GUARDED_BY(mMutex) = false; -}; - -// Keep track of all AidlManagedBuffers and assign each with a unique token. -class AidlBufferTracker : public std::enable_shared_from_this<AidlBufferTracker> { - DISALLOW_COPY_AND_ASSIGN(AidlBufferTracker); - - public: - // A RAII class to help manage the lifetime of the token. - // It is only supposed to be constructed in AidlBufferTracker::add. - class Token { - DISALLOW_COPY_AND_ASSIGN(Token); - - public: - Token(uint32_t token, std::shared_ptr<AidlBufferTracker> tracker) - : kToken(token), kBufferTracker(std::move(tracker)) {} - ~Token() { kBufferTracker->free(kToken); } - uint32_t get() const { return kToken; } - - private: - const uint32_t kToken; - const std::shared_ptr<AidlBufferTracker> kBufferTracker; - }; - - // The factory of AidlBufferTracker. This ensures that the AidlBufferTracker is always managed - // by a shared_ptr. - static std::shared_ptr<AidlBufferTracker> create() { - return std::make_shared<AidlBufferTracker>(); - } - - // Prefer AidlBufferTracker::create. - AidlBufferTracker() : mTokenToBuffers(1) {} - - std::unique_ptr<Token> add(std::shared_ptr<AidlManagedBuffer> buffer); - std::shared_ptr<AidlManagedBuffer> get(uint32_t token) const; - - private: - void free(uint32_t token); - - mutable std::mutex mMutex; - std::stack<uint32_t, std::vector<uint32_t>> mFreeTokens GUARDED_BY(mMutex); - - // Since the tokens are allocated in a non-sparse way, we use a vector to represent the mapping. - // The index of the vector is the token. When the token gets freed, the corresponding entry is - // set to nullptr. mTokenToBuffers[0] is always set to nullptr because 0 is an invalid token. - std::vector<std::shared_ptr<AidlManagedBuffer>> mTokenToBuffers GUARDED_BY(mMutex); -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_BUFFER_TRACKER_H
diff --git a/common/include/AidlHalInterfaces.h b/common/include/AidlHalInterfaces.h deleted file mode 100644 index 28379bb..0000000 --- a/common/include/AidlHalInterfaces.h +++ /dev/null
@@ -1,72 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_INTERFACES_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_INTERFACES_H - -#include <aidl/android/hardware/neuralnetworks/BnBuffer.h> -#include <aidl/android/hardware/neuralnetworks/BnBurst.h> -#include <aidl/android/hardware/neuralnetworks/BnDevice.h> -#include <aidl/android/hardware/neuralnetworks/BnFencedExecutionCallback.h> -#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h> -#include <aidl/android/hardware/neuralnetworks/BnPreparedModelCallback.h> -#include <aidl/android/hardware/neuralnetworks/BufferDesc.h> -#include <aidl/android/hardware/neuralnetworks/BufferRole.h> -#include <aidl/android/hardware/neuralnetworks/Capabilities.h> -#include <aidl/android/hardware/neuralnetworks/DataLocation.h> -#include <aidl/android/hardware/neuralnetworks/DeviceBuffer.h> -#include <aidl/android/hardware/neuralnetworks/DeviceType.h> -#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h> -#include <aidl/android/hardware/neuralnetworks/ExecutionPreference.h> -#include <aidl/android/hardware/neuralnetworks/Extension.h> -#include <aidl/android/hardware/neuralnetworks/ExtensionNameAndPrefix.h> -#include <aidl/android/hardware/neuralnetworks/ExtensionOperandTypeInformation.h> -#include <aidl/android/hardware/neuralnetworks/FusedActivationFunc.h> -#include <aidl/android/hardware/neuralnetworks/IBuffer.h> -#include <aidl/android/hardware/neuralnetworks/IDevice.h> -#include <aidl/android/hardware/neuralnetworks/IFencedExecutionCallback.h> -#include <aidl/android/hardware/neuralnetworks/IPreparedModel.h> -#include <aidl/android/hardware/neuralnetworks/IPreparedModelCallback.h> -#include <aidl/android/hardware/neuralnetworks/IPreparedModelParcel.h> -#include <aidl/android/hardware/neuralnetworks/Memory.h> -#include <aidl/android/hardware/neuralnetworks/Model.h> -#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h> -#include <aidl/android/hardware/neuralnetworks/Operand.h> -#include <aidl/android/hardware/neuralnetworks/OperandExtraParams.h> -#include <aidl/android/hardware/neuralnetworks/OperandLifeTime.h> -#include <aidl/android/hardware/neuralnetworks/OperandPerformance.h> -#include <aidl/android/hardware/neuralnetworks/OperandType.h> -#include <aidl/android/hardware/neuralnetworks/Operation.h> -#include <aidl/android/hardware/neuralnetworks/OperationType.h> -#include <aidl/android/hardware/neuralnetworks/OutputShape.h> -#include <aidl/android/hardware/neuralnetworks/PerformanceInfo.h> -#include <aidl/android/hardware/neuralnetworks/Priority.h> -#include <aidl/android/hardware/neuralnetworks/Request.h> -#include <aidl/android/hardware/neuralnetworks/RequestArgument.h> -#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h> -#include <aidl/android/hardware/neuralnetworks/Subgraph.h> -#include <aidl/android/hardware/neuralnetworks/SymmPerChannelQuantParams.h> -#include <aidl/android/hardware/neuralnetworks/Timing.h> - -namespace android::nn { - -namespace aidl_hal = ::aidl::android::hardware::neuralnetworks; - -inline constexpr aidl_hal::Priority kDefaultPriorityAidl = aidl_hal::Priority::MEDIUM; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_INTERFACES_H
diff --git a/common/include/AidlHalUtils.h b/common/include/AidlHalUtils.h deleted file mode 100644 index 1053603..0000000 --- a/common/include/AidlHalUtils.h +++ /dev/null
@@ -1,51 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_UTILS_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_UTILS_H - -#include <vector> - -#include "AidlHalInterfaces.h" - -namespace android { -namespace nn { - -// Return a vector with one entry for each non-extension OperandType except -// SUBGRAPH, set to the specified PerformanceInfo value. The vector will be -// sorted by OperandType. -// -// Control flow (OperandType::SUBGRAPH) operation performance is specified -// separately using Capabilities::ifPerformance and -// Capabilities::whilePerformance. -std::vector<aidl_hal::OperandPerformance> nonExtensionOperandPerformance( - aidl_hal::PerformanceInfo perf); - -// Update the vector entry corresponding to the specified OperandType with the -// specified PerformanceInfo value. The vector must already have an entry for -// that OperandType, and must be sorted by OperandType. -void update(std::vector<aidl_hal::OperandPerformance>* operandPerformance, - aidl_hal::OperandType type, aidl_hal::PerformanceInfo perf); - -// Returns true if an operand type is an extension type. -bool isExtensionOperandType(aidl_hal::OperandType type); - -aidl_hal::ErrorStatus convertResultCodeToAidlErrorStatus(int resultCode); - -} // namespace nn -} // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_HAL_UTILS_H
diff --git a/common/include/AidlValidateHal.h b/common/include/AidlValidateHal.h deleted file mode 100644 index 0354631..0000000 --- a/common/include/AidlValidateHal.h +++ /dev/null
@@ -1,46 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_VALIDATE_HAL_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_VALIDATE_HAL_H - -#include <memory> -#include <set> -#include <tuple> -#include <vector> - -#include "AidlHalInterfaces.h" -#include "nnapi/TypeUtils.h" -#include "nnapi/Validation.h" - -namespace android { -namespace nn { - -using AidlHalPreparedModelRole = std::tuple<const aidl_hal::IPreparedModel*, IOType, uint32_t>; - -bool validateMemoryDesc( - const aidl_hal::BufferDesc& desc, - const std::vector<std::shared_ptr<aidl_hal::IPreparedModel>>& preparedModels, - const std::vector<aidl_hal::BufferRole>& inputRoles, - const std::vector<aidl_hal::BufferRole>& outputRoles, - std::function<const aidl_hal::Model*(const std::shared_ptr<aidl_hal::IPreparedModel>&)> - getModel, - std::set<AidlHalPreparedModelRole>* preparedModelRoles, aidl_hal::Operand* combinedOperand); - -} // namespace nn -} // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_AIDL_VALIDATE_HAL_H
diff --git a/common/include/AndroidVersionUtil.h b/common/include/AndroidVersionUtil.h deleted file mode 100644 index dea8e08..0000000 --- a/common/include/AndroidVersionUtil.h +++ /dev/null
@@ -1,24 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_ANDROID_VERSION_UTILS_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_ANDROID_VERSION_UTILS_H - -#include <android/api-level.h> - -#define __NNAPI_AIDL_MIN_ANDROID_API__ __ANDROID_API_S__ - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_ANDROID_VERSION_UTILS_H
diff --git a/common/include/BufferTracker.h b/common/include/BufferTracker.h index 84e037e..feabda6 100644 --- a/common/include/BufferTracker.h +++ b/common/include/BufferTracker.h
@@ -18,7 +18,6 @@ #define ANDROID_FRAMEWORKS_ML_NN_COMMON_BUFFER_TRACKER_H #include <android-base/macros.h> -#include <android-base/thread_annotations.h> #include <map> #include <memory> @@ -29,9 +28,8 @@ #include <vector> #include "CpuExecutor.h" -#include "LegacyUtils.h" -#include "nnapi/Types.h" -#include "nnapi/Validation.h" +#include "HalInterfaces.h" +#include "Utils.h" namespace android::nn { @@ -39,25 +37,25 @@ class ManagedBuffer { public: static std::shared_ptr<ManagedBuffer> create(uint32_t size, std::set<PreparedModelRole> roles, - const Operand& operand); + const hal::Operand& operand); // Prefer ManagedBuffer::create. ManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size, - std::set<PreparedModelRole> roles, const Operand& operand); + std::set<PreparedModelRole> roles, const hal::Operand& operand); RunTimePoolInfo createRunTimePoolInfo() const { return RunTimePoolInfo::createFromExistingBuffer(kBuffer.get(), kSize); } // "poolIndex" is the index of this buffer in the request.pools. - ErrorStatus validateRequest(uint32_t poolIndex, const Request& request, - const IPreparedModel* preparedModel) const; + hal::ErrorStatus validateRequest(uint32_t poolIndex, const hal::Request& request, + const hal::IPreparedModel* preparedModel) const; - // "size" is the byte size of the Memory provided to the copyFrom or copyTo method. - ErrorStatus validateCopyFrom(const Dimensions& dimensions, uint32_t size) const; - ErrorStatus validateCopyTo(uint32_t size) const; + // "size" is the byte size of the hidl_memory provided to the copyFrom or copyTo method. + hal::ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const; + hal::ErrorStatus validateCopyTo(uint32_t size) const; - bool updateDimensions(const Dimensions& dimensions); + bool updateDimensions(const std::vector<uint32_t>& dimensions); void setInitialized(bool initialized); private: @@ -65,10 +63,10 @@ const std::unique_ptr<uint8_t[]> kBuffer; const uint32_t kSize; const std::set<PreparedModelRole> kRoles; - const OperandType kOperandType; - const Dimensions kInitialDimensions; - Dimensions mUpdatedDimensions GUARDED_BY(mMutex); - bool mInitialized GUARDED_BY(mMutex) = false; + const hal::OperandType kOperandType; + const std::vector<uint32_t> kInitialDimensions; + std::vector<uint32_t> mUpdatedDimensions; + bool mInitialized = false; }; // Keep track of all ManagedBuffers and assign each with a unique token. @@ -82,13 +80,13 @@ DISALLOW_COPY_AND_ASSIGN(Token); public: - Token(Request::MemoryDomainToken token, std::shared_ptr<BufferTracker> tracker) + Token(uint32_t token, std::shared_ptr<BufferTracker> tracker) : kToken(token), kBufferTracker(std::move(tracker)) {} ~Token() { kBufferTracker->free(kToken); } - Request::MemoryDomainToken get() const { return kToken; } + uint32_t get() const { return kToken; } private: - const Request::MemoryDomainToken kToken; + const uint32_t kToken; const std::shared_ptr<BufferTracker> kBufferTracker; }; @@ -97,22 +95,21 @@ static std::shared_ptr<BufferTracker> create() { return std::make_shared<BufferTracker>(); } // Prefer BufferTracker::create. - BufferTracker(); + BufferTracker() : mTokenToBuffers(1) {} std::unique_ptr<Token> add(std::shared_ptr<ManagedBuffer> buffer); - std::shared_ptr<ManagedBuffer> get(Request::MemoryDomainToken token) const; + std::shared_ptr<ManagedBuffer> get(uint32_t token) const; private: - void free(Request::MemoryDomainToken token); + void free(uint32_t token); mutable std::mutex mMutex; - std::stack<Request::MemoryDomainToken, std::vector<Request::MemoryDomainToken>> mFreeTokens - GUARDED_BY(mMutex); + std::stack<uint32_t, std::vector<uint32_t>> mFreeTokens; // Since the tokens are allocated in a non-sparse way, we use a vector to represent the mapping. // The index of the vector is the token. When the token gets freed, the corresponding entry is // set to nullptr. mTokenToBuffers[0] is always set to nullptr because 0 is an invalid token. - std::vector<std::shared_ptr<ManagedBuffer>> mTokenToBuffers GUARDED_BY(mMutex); + std::vector<std::shared_ptr<ManagedBuffer>> mTokenToBuffers; }; } // namespace android::nn
diff --git a/common/include/ControlFlow.h b/common/include/ControlFlow.h index 379cb11..9149903 100644 --- a/common/include/ControlFlow.h +++ b/common/include/ControlFlow.h
@@ -17,11 +17,6 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_CONTROLFLOW_H #define ANDROID_FRAMEWORKS_ML_NN_COMMON_CONTROLFLOW_H -#include <chrono> -#include <cstdint> - -#include "nnapi/Types.h" - namespace android { namespace nn { namespace operation_if { @@ -40,10 +35,8 @@ constexpr uint32_t kFirstInput = 2; // See ANeuralNetworksExecution_setLoopTimeout. -constexpr uint64_t kTimeoutNsDefault = - std::chrono::duration_cast<std::chrono::nanoseconds>(kLoopTimeoutDefault).count(); -constexpr uint64_t kTimeoutNsMaximum = - std::chrono::duration_cast<std::chrono::nanoseconds>(kLoopTimeoutMaximum).count(); +constexpr uint64_t kTimeoutNsDefault = 2'000'000'000; +constexpr uint64_t kTimeoutNsMaximum = 15'000'000'000; } // namespace operation_while } // namespace nn
diff --git a/common/include/CpuExecutor.h b/common/include/CpuExecutor.h index aad25fc..edb2332 100644 --- a/common/include/CpuExecutor.h +++ b/common/include/CpuExecutor.h
@@ -18,7 +18,6 @@ #define ANDROID_FRAMEWORKS_ML_NN_COMMON_CPU_EXECUTOR_H #include <android-base/macros.h> -#include <nnapi/Types.h> #include <algorithm> #include <memory> @@ -26,9 +25,10 @@ #include <vector> #include "ControlFlow.h" -#include "LegacyUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" +#include "Utils.h" namespace android { namespace nn { @@ -37,7 +37,7 @@ // may change during execution. struct RunTimeOperandInfo { // TODO Storing the type here is redundant, as it won't change during execution. - OperandType type; + hal::OperandType type; // The type and dimensions of the operand. The dimensions can // change at runtime. We include the type because it's useful // to pass together with the dimension to the functions implementing @@ -64,14 +64,14 @@ // The length of the buffer. uint32_t length; // Whether this is a temporary variable, a model input, a constant, etc. - Operand::LifeTime lifetime; + hal::OperandLifeTime lifetime; // Keeps track of how many operations have yet to make use // of this temporary variable. When the count is decremented to 0, // we free the buffer. For non-temporary variables, this count is // always 0. uint32_t numberOfUsesLeft; - Operand::ExtraParams extraParams; + hal::OperandExtraParams extraParams; Shape shape() const { return { @@ -84,7 +84,7 @@ } bool isSufficient() const { - if (isExtension(type)) { + if (isExtensionOperandType(type)) { // We don't know sizes of extension types. return true; } @@ -98,19 +98,19 @@ // may reference the same region of memory by either: // (1) copying an existing RunTimePoolInfo object, or // (2) creating multiple RunTimePoolInfo objects from the same memory resource -// (e.g., "createFromMemory" or "createFromExistingBuffer") +// (e.g., "createFromHidlMemory" or "createFromExistingBuffer") // -// If the underlying region of memory is mapped by "createFromMemory", the +// If the underlying region of memory is mapped by "createFromHidlMemory", the // mapping will be sustained until it is no longer referenced by any // RunTimePoolInfo objects. class RunTimePoolInfo { public: - static std::optional<RunTimePoolInfo> createFromMemory(const SharedMemory& memory); + static std::optional<RunTimePoolInfo> createFromHidlMemory(const hal::hidl_memory& hidlMemory); static RunTimePoolInfo createFromExistingBuffer(uint8_t* buffer, uint32_t size = 0); uint8_t* getBuffer() const; bool flush() const; - const SharedMemory& getMemory() const; + const hal::hidl_memory& getHidlMemory() const; uint32_t getSize() const; private: @@ -120,11 +120,11 @@ std::shared_ptr<const RunTimePoolInfoImpl> mImpl; }; -bool setRunTimePoolInfosFromCanonicalMemories(std::vector<RunTimePoolInfo>* poolInfos, - const std::vector<SharedMemory>& pools); +bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos, + const hal::hidl_vec<hal::hidl_memory>& pools); bool setRunTimePoolInfosFromMemoryPools(std::vector<RunTimePoolInfo>* poolInfos, - const std::vector<Request::MemoryPool>& pools); + const hal::hidl_vec<hal::Request::MemoryPool>& pools); // This class is used to execute a model on the CPU. class CpuExecutor { @@ -146,45 +146,45 @@ // specified in the constructor. // The model must outlive the executor. We prevent it from being modified // while this is executing. - int run(const Model& model, const Request& request, + int run(const hal::Model& model, const hal::Request& request, const std::vector<RunTimePoolInfo>& modelPoolInfos, const std::vector<RunTimePoolInfo>& requestPoolInfos); - const std::vector<OutputShape>& getOutputShapes() const { + const std::vector<hal::OutputShape>& getOutputShapes() const { CHECK(mFinished) << "getOutputShapes() called by an unfinished CpuExecutor."; return mOutputShapes; } - void setDeadline(const TimePoint& deadline) { mDeadline = deadline; } + void setDeadline(const Deadline& deadline) { mDeadline = deadline; } void setLoopTimeout(uint64_t duration) { mLoopTimeoutDuration = duration; } private: // Creates runtime info from what's in the model. - std::vector<RunTimeOperandInfo> initializeRunTimeInfo(const Model::Subgraph& subgraph); + std::vector<RunTimeOperandInfo> initializeRunTimeInfo(const hal::Subgraph& subgraph); // Adjusts the runtime info for the arguments passed to the model, // modifying the buffer location, and possibly the dimensions. void updateForArguments(const std::vector<uint32_t>& indexes, - const std::vector<Request::Argument>& arguments, + const hal::hidl_vec<hal::RequestArgument>& arguments, const std::vector<RunTimePoolInfo>& requestPoolInfos, RunTimeOperandInfo* operands); // Runs one subgraph. - int executeSubgraph(const Model::Subgraph& subgraph, RunTimeOperandInfo* operands); + int executeSubgraph(const hal::Subgraph& subgraph, RunTimeOperandInfo* operands); // Runs one operation of the graph. - int executeOperation(const Operation& operation, RunTimeOperandInfo* operands); - int executeIfOperation(const Operation& operation, RunTimeOperandInfo* operands); - int executeWhileOperation(const Operation& operation, RunTimeOperandInfo* operands); + int executeOperation(const hal::Operation& operation, RunTimeOperandInfo* operands); + int executeIfOperation(const hal::Operation& operation, RunTimeOperandInfo* operands); + int executeWhileOperation(const hal::Operation& operation, RunTimeOperandInfo* operands); void setOutputShapes(const std::vector<uint32_t>& outputIndexes, const std::vector<RunTimeOperandInfo>& operands); // Compile-time operand value information used by initializeRunTimeInfo. // The fields are only valid while run() is being executed. - const uint8_t* mModelOperandValues = nullptr; + const hal::hidl_vec<uint8_t>* mModelOperandValues = nullptr; const std::vector<RunTimePoolInfo>* mModelPoolInfos = nullptr; - const std::vector<Model::Subgraph>* mReferencedSubgraphs = nullptr; + const hal::hidl_vec<hal::Subgraph>* mReferencedSubgraphs = nullptr; // The output operand shapes returning to the runtime. - std::vector<OutputShape> mOutputShapes; + std::vector<hal::OutputShape> mOutputShapes; // Whether execution is finished and mOutputShapes is ready bool mFinished = false; @@ -192,7 +192,7 @@ // The deadline hint for the maximum amount of time the client expects the // execution will take. If this deadline is exceeded, the CpuExecutor will // abort the execution if there are remaining ops to execute. - OptionalTimePoint mDeadline; + std::optional<Deadline> mDeadline; // The maximum amount of time in nanoseconds that can be spent executing a // WHILE loop. @@ -259,16 +259,17 @@ } inline bool IsNullInput(const RunTimeOperandInfo* input) { - return input->lifetime == Operand::LifeTime::NO_VALUE; + return input->lifetime == hal::OperandLifeTime::NO_VALUE; } -inline int NumInputsWithValues(const Operation& operation, const RunTimeOperandInfo* operands) { +inline int NumInputsWithValues(const hal::Operation& operation, + const RunTimeOperandInfo* operands) { const std::vector<uint32_t>& inputs = operation.inputs; return std::count_if(inputs.begin(), inputs.end(), [&operands](uint32_t i) { return !IsNullInput(&operands[i]); }); } -inline int NumOutputs(const Operation& operation) { +inline int NumOutputs(const hal::Operation& operation) { return operation.outputs.size(); } @@ -280,12 +281,12 @@ return operand->shape().dimensions[i]; } -inline RunTimeOperandInfo* GetInput(const Operation& operation, RunTimeOperandInfo* operands, +inline RunTimeOperandInfo* GetInput(const hal::Operation& operation, RunTimeOperandInfo* operands, int index) { return &operands[operation.inputs[index]]; } -inline RunTimeOperandInfo* GetOutput(const Operation& operation, RunTimeOperandInfo* operands, +inline RunTimeOperandInfo* GetOutput(const hal::Operation& operation, RunTimeOperandInfo* operands, int index) { return &operands[operation.outputs[index]]; }
diff --git a/common/include/CpuOperationUtils.h b/common/include/CpuOperationUtils.h index ff58ff1..8799529 100644 --- a/common/include/CpuOperationUtils.h +++ b/common/include/CpuOperationUtils.h
@@ -17,7 +17,6 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_CPU_OPERATION_UTILS_H #define ANDROID_FRAMEWORKS_ML_NN_COMMON_CPU_OPERATION_UTILS_H -#include <android-base/logging.h> #include <tensorflow/lite/kernels/internal/types.h> #include <algorithm> @@ -33,7 +32,7 @@ // The implementations in tflite/kernels/internal/ take a Dims<4> object // even if the original tensors were not 4D. inline tflite::Dims<4> convertShapeToDims(const Shape& shape) { - CHECK_LE(shape.dimensions.size(), 4u); + nnAssert(shape.dimensions.size() <= 4); tflite::Dims<4> dims; // The dimensions are reversed in Dims<4>.
diff --git a/common/include/DefaultExecution.h b/common/include/DefaultExecution.h deleted file mode 100644 index 97ba692..0000000 --- a/common/include/DefaultExecution.h +++ /dev/null
@@ -1,64 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_DEFAULT_EXECUTION_H -#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_DEFAULT_EXECUTION_H - -#include <android-base/macros.h> -#include <nnapi/IExecution.h> -#include <nnapi/IPreparedModel.h> -#include <nnapi/Result.h> -#include <nnapi/Types.h> - -#include <memory> -#include <utility> -#include <vector> - -namespace android::nn { - -class DefaultExecution final : public IExecution { - public: - DefaultExecution(SharedPreparedModel preparedModel, Request request, MeasureTiming measure, - OptionalDuration loopTimeoutDuration) - : kPreparedModel(std::move(preparedModel)), - kRequest(std::move(request)), - kMeasure(measure), - kLoopTimeoutDuration(loopTimeoutDuration) { - CHECK(kPreparedModel != nullptr); - } - - ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> compute( - const OptionalTimePoint& deadline) const override { - return kPreparedModel->execute(kRequest, kMeasure, deadline, kLoopTimeoutDuration); - } - - GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>> computeFenced( - const std::vector<SyncFence>& waitFor, const OptionalTimePoint& deadline, - const OptionalDuration& timeoutDurationAfterFence) const override { - return kPreparedModel->executeFenced(kRequest, waitFor, kMeasure, deadline, - kLoopTimeoutDuration, timeoutDurationAfterFence); - } - - private: - const SharedPreparedModel kPreparedModel; - const Request kRequest; - const MeasureTiming kMeasure; - const OptionalDuration kLoopTimeoutDuration; -}; - -} // namespace android::nn - -#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_DEFAULT_EXECUTION_H
diff --git a/common/include/GraphDump.h b/common/include/GraphDump.h index 208b4ec..207afe5 100644 --- a/common/include/GraphDump.h +++ b/common/include/GraphDump.h
@@ -17,9 +17,9 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_GRAPH_DUMP_H #define ANDROID_FRAMEWORKS_ML_NN_COMMON_GRAPH_DUMP_H -#include <iostream> +#include <android/hardware/neuralnetworks/1.3/types.h> -#include "nnapi/Types.h" +#include <iostream> namespace android { namespace nn { @@ -45,7 +45,8 @@ // A model input or output (operand) is shown in "reverse colors" -- // white text on a black background. // -void graphDump(const char* name, const Model& model, std::ostream* outStream = nullptr); +void graphDump(const char* name, const ::android::hardware::neuralnetworks::V1_3::Model& model, + std::ostream* outStream = nullptr); } // namespace nn } // namespace android
diff --git a/common/include/HalBufferTracker.h b/common/include/HalBufferTracker.h deleted file mode 100644 index 2bce3ff..0000000 --- a/common/include/HalBufferTracker.h +++ /dev/null
@@ -1,121 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_HAL_BUFFER_TRACKER_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_HAL_BUFFER_TRACKER_H - -#include <android-base/macros.h> - -#include <map> -#include <memory> -#include <mutex> -#include <set> -#include <stack> -#include <utility> -#include <vector> - -#include "CpuExecutor.h" -#include "HalInterfaces.h" -#include "Utils.h" -#include "ValidateHal.h" - -namespace android::nn { - -// This class manages a CPU buffer allocated on heap and provides validation methods. -class HalManagedBuffer { - public: - static std::shared_ptr<HalManagedBuffer> create(uint32_t size, - std::set<HalPreparedModelRole> roles, - const Operand& operand); - - // Prefer HalManagedBuffer::create. - HalManagedBuffer(std::unique_ptr<uint8_t[]> buffer, uint32_t size, - std::set<HalPreparedModelRole> roles, const Operand& operand); - - RunTimePoolInfo createRunTimePoolInfo() const { - return RunTimePoolInfo::createFromExistingBuffer(kBuffer.get(), kSize); - } - - // "poolIndex" is the index of this buffer in the request.pools. - ErrorStatus validateRequest(uint32_t poolIndex, const Request& request, - const V1_3::IPreparedModel* preparedModel) const; - - // "size" is the byte size of the Memory provided to the copyFrom or copyTo method. - ErrorStatus validateCopyFrom(const std::vector<uint32_t>& dimensions, uint32_t size) const; - ErrorStatus validateCopyTo(uint32_t size) const; - - bool updateDimensions(const std::vector<uint32_t>& dimensions); - void setInitialized(bool initialized); - - private: - mutable std::mutex mMutex; - const std::unique_ptr<uint8_t[]> kBuffer; - const uint32_t kSize; - const std::set<HalPreparedModelRole> kRoles; - const OperandType kOperandType; - const std::vector<uint32_t> kInitialDimensions; - std::vector<uint32_t> mUpdatedDimensions; - bool mInitialized = false; -}; - -// Keep track of all HalManagedBuffers and assign each with a unique token. -class HalBufferTracker : public std::enable_shared_from_this<HalBufferTracker> { - DISALLOW_COPY_AND_ASSIGN(HalBufferTracker); - - public: - // A RAII class to help manage the lifetime of the token. - // It is only supposed to be constructed in HalBufferTracker::add. - class Token { - DISALLOW_COPY_AND_ASSIGN(Token); - - public: - Token(uint32_t token, std::shared_ptr<HalBufferTracker> tracker) - : kToken(token), kHalBufferTracker(std::move(tracker)) {} - ~Token() { kHalBufferTracker->free(kToken); } - uint32_t get() const { return kToken; } - - private: - const uint32_t kToken; - const std::shared_ptr<HalBufferTracker> kHalBufferTracker; - }; - - // The factory of HalBufferTracker. This ensures that the HalBufferTracker is always managed by - // a shared_ptr. - static std::shared_ptr<HalBufferTracker> create() { - return std::make_shared<HalBufferTracker>(); - } - - // Prefer HalBufferTracker::create. - HalBufferTracker() : mTokenToBuffers(1) {} - - std::unique_ptr<Token> add(std::shared_ptr<HalManagedBuffer> buffer); - std::shared_ptr<HalManagedBuffer> get(uint32_t token) const; - - private: - void free(uint32_t token); - - mutable std::mutex mMutex; - std::stack<uint32_t, std::vector<uint32_t>> mFreeTokens; - - // Since the tokens are allocated in a non-sparse way, we use a vector to represent the mapping. - // The index of the vector is the token. When the token gets freed, the corresponding entry is - // set to nullptr. mTokenToBuffers[0] is always set to nullptr because 0 is an invalid token. - std::vector<std::shared_ptr<HalManagedBuffer>> mTokenToBuffers; -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_HAL_BUFFER_TRACKER_H
diff --git a/common/include/HalInterfaces.h b/common/include/HalInterfaces.h index 8eeb23d..4e3a380 100644 --- a/common/include/HalInterfaces.h +++ b/common/include/HalInterfaces.h
@@ -40,20 +40,74 @@ #include <functional> -namespace android::nn { +namespace android::nn::hal { -namespace V1_0 = ::android::hardware::neuralnetworks::V1_0; -namespace V1_1 = ::android::hardware::neuralnetworks::V1_1; -namespace V1_2 = ::android::hardware::neuralnetworks::V1_2; -namespace V1_3 = ::android::hardware::neuralnetworks::V1_3; +using android::sp; -using HalCacheToken = - hardware::hidl_array<uint8_t, - static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; -using HalDeviceFactory = std::function<sp<V1_0::IDevice>(bool blocking)>; +using hardware::hidl_death_recipient; +using hardware::hidl_enum_range; +using hardware::hidl_handle; +using hardware::hidl_memory; +using hardware::hidl_string; +using hardware::hidl_vec; +using hardware::Return; +using hardware::Void; -inline constexpr V1_3::Priority kDefaultPriority13 = V1_3::Priority::MEDIUM; +using hidl::memory::V1_0::IMemory; -} // namespace android::nn +namespace V1_0 = hardware::neuralnetworks::V1_0; +namespace V1_1 = hardware::neuralnetworks::V1_1; +namespace V1_2 = hardware::neuralnetworks::V1_2; +namespace V1_3 = hardware::neuralnetworks::V1_3; + +using V1_0::DataLocation; +using V1_0::DeviceStatus; +using V1_0::FusedActivationFunc; +using V1_0::PerformanceInfo; +using V1_0::RequestArgument; +using V1_1::ExecutionPreference; +using V1_2::Constant; +using V1_2::DeviceType; +using V1_2::Extension; +using V1_2::MeasureTiming; +using V1_2::OutputShape; +using V1_2::SymmPerChannelQuantParams; +using V1_2::Timing; +using V1_3::BufferDesc; +using V1_3::BufferRole; +using V1_3::Capabilities; +using V1_3::ErrorStatus; +using V1_3::IBuffer; +using V1_3::IDevice; +using V1_3::IExecutionCallback; +using V1_3::IFencedExecutionCallback; +using V1_3::IPreparedModel; +using V1_3::IPreparedModelCallback; +using V1_3::LoopTimeoutDurationNs; +using V1_3::Model; +using V1_3::Operand; +using V1_3::OperandLifeTime; +using V1_3::OperandType; +using V1_3::OperandTypeRange; +using V1_3::Operation; +using V1_3::OperationType; +using V1_3::OperationTypeRange; +using V1_3::OptionalTimeoutDuration; +using V1_3::OptionalTimePoint; +using V1_3::Priority; +using V1_3::Request; +using V1_3::Subgraph; +using ExtensionNameAndPrefix = V1_2::Model::ExtensionNameAndPrefix; +using ExtensionTypeEncoding = V1_2::Model::ExtensionTypeEncoding; +using OperandExtraParams = V1_2::Operand::ExtraParams; + +using CacheToken = + hardware::hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; +using DeviceFactory = std::function<sp<V1_0::IDevice>(bool blocking)>; +using ModelFactory = std::function<Model()>; + +inline constexpr Priority kDefaultPriority = Priority::MEDIUM; + +} // namespace android::nn::hal #endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_HAL_INTERFACES_H
diff --git a/common/include/LegacyHalUtils.h b/common/include/LegacyHalUtils.h deleted file mode 100644 index 7a8ce6c..0000000 --- a/common/include/LegacyHalUtils.h +++ /dev/null
@@ -1,406 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// This file contains pre-canonical-types utility code and includes HAL -// utilities. LegacyUtils.h is the subset of these utilities that do not touch -// HAL. - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_HAL_UTILS_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_HAL_UTILS_H - -#include <android-base/logging.h> -#include <nnapi/TypeUtils.h> -#include <nnapi/Types.h> - -#include <set> -#include <string> -#include <tuple> -#include <utility> -#include <vector> - -#include "HalInterfaces.h" -#include "LegacyUtils.h" -#include "NeuralNetworks.h" -#include "ValidateHal.h" - -namespace android { -namespace nn { - -using LegacyClock = std::chrono::steady_clock; -using LegacyDuration = std::chrono::duration<uint64_t, std::nano>; -using LegacyOptionalDuration = std::optional<LegacyDuration>; -using LegacyTimePoint = std::chrono::time_point<std::chrono::steady_clock, LegacyDuration>; -using LegacyOptionalTimePoint = std::optional<LegacyTimePoint>; - -// Make an optional deadline from an V1_3::OptionalTimePoint. -LegacyOptionalTimePoint makeDeadline(const V1_3::OptionalTimePoint& timePoint); - -// Make an optional deadline from an V1_3::OptionalDuration. -LegacyOptionalTimePoint makeDeadline(const V1_3::OptionalTimeoutDuration& optionalDuration); - -// Returns true if the deadline has passed. Returns false if either the deadline -// has not been exceeded or if the deadline is not present. -bool hasDeadlinePassed(const LegacyOptionalTimePoint& deadline); - -// Ensure that every user of FalseyErrorStream is linked to the -// correct instance, using the correct LOG_TAG -namespace { - -template <HalVersion version> -struct VersionedType {}; - -template <> -struct VersionedType<HalVersion::V1_2> { - using OperandPerformance = V1_2::Capabilities::OperandPerformance; - using OperandType = V1_2::OperandType; -}; - -template <> -struct VersionedType<HalVersion::V1_3> { - using OperandPerformance = V1_3::Capabilities::OperandPerformance; - using OperandType = V1_3::OperandType; -}; - -template <HalVersion version> -using VersionedOperandPerformance = typename VersionedType<version>::OperandPerformance; -template <HalVersion version> -using VersionedOperandType = typename VersionedType<version>::OperandType; - -} // namespace - -// Return a vector with one entry for each non-extension OperandType except -// SUBGRAPH, set to the specified PerformanceInfo value. The vector will be -// sorted by OperandType. -// -// Control flow (OperandType::SUBGRAPH) operation performance is specified -// separately using Capabilities::ifPerformance and -// Capabilities::whilePerformance. -template <HalVersion version> -hardware::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance( - V1_0::PerformanceInfo perf); - -// Update the vector entry corresponding to the specified OperandType with the -// specified PerformanceInfo value. The vector must already have an entry for -// that OperandType, and must be sorted by OperandType. -void update(hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance, - V1_2::OperandType type, V1_0::PerformanceInfo perf); -void update(hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance, - V1_3::OperandType type, V1_0::PerformanceInfo perf); - -// Look for a vector entry corresponding to the specified OperandType. If -// found, return the associated PerformanceInfo. If not, return a pessimistic -// PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType. -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance, - V1_2::OperandType type); -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance, - V1_3::OperandType type); - -// Returns true if an operand type is an extension type. -bool isExtensionOperandType(V1_3::OperandType type); - -// Returns true if an operation type is an extension type. -bool isExtensionOperationType(V1_3::OperationType type); - -// Returns the amount of space needed to store a value of the specified -// dimensions and type. For a tensor with unspecified rank or at least one -// unspecified dimension, returns zero. -// -// Aborts if the specified type is an extension type. -// Aborts if the size would overflow the return type. -// -// See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&). -uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type, - const std::vector<uint32_t>& dimensions); - -// Returns the amount of space needed to store a value of the dimensions and -// type of this operand. For a tensor with unspecified rank or at least one -// unspecified dimension, returns zero. -// -// Aborts if the specified type is an extension type. -// Aborts if the size would overflow the return type. -// -// See also TypeManager::getSizeOfData(const Operand&). -inline uint32_t nonExtensionOperandSizeOfData(const V1_3::Operand& operand) { - return nonExtensionOperandSizeOfData(operand.type, operand.dimensions); -} - -// Returns true if the amount of space needed to store a value of the specified -// dimensions and element size overflows the uint32_t type. -// -// Aborts if the specified type is an extension type. -// -// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&). -bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type, - const std::vector<uint32_t>& dimensions); - -// Returns the name of the operation type in ASCII. -std::string getOperationName(V1_3::OperationType opCode); - -// Returns the name of the operand type in ASCII. -std::string getOperandTypeName(V1_3::OperandType type); - -// Whether an operand of tensor type has unspecified dimensions. -// -// Undefined behavior if the operand type is a scalar type. -bool tensorHasUnspecifiedDimensions(V1_3::OperandType type, - const std::vector<uint32_t>& dimensions); -bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand); - -// Does a detailed LOG(INFO) of the model -void logModelToInfo(const V1_0::Model& model); -void logModelToInfo(const V1_1::Model& model); -void logModelToInfo(const V1_2::Model& model); -void logModelToInfo(const V1_3::Model& model); - -bool validateOperandSymmPerChannelQuantParams( - const V1_3::Operand& halOperand, - const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag); - -// Convert ANEURALNETWORKS_* result code to ErrorStatus. -// Not guaranteed to be a 1-to-1 mapping. -V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode); - -// Convert ErrorStatus to ANEURALNETWORKS_* result code. -// Not guaranteed to be a 1-to-1 mapping. -int convertErrorStatusToResultCode(V1_3::ErrorStatus status); - -// Convert execution results to runtime format. Additionally checks that the -// returned results abide by the HAL specification, and logs an error if the -// result violates the specification. -std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult( - V1_3::ErrorStatus status, const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, - const V1_2::Timing& timing); - -// Forward declaration for type defined in CpuExecutor.h. -class RunTimePoolInfo; - -bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos, - const hardware::hidl_vec<hardware::hidl_memory>& pools); - -// Versioning - -bool compliantWithV1_0(const V1_0::Capabilities& capabilities); -bool compliantWithV1_0(const V1_1::Capabilities& capabilities); -bool compliantWithV1_0(const V1_2::Capabilities& capabilities); -bool compliantWithV1_0(const V1_3::Capabilities& capabilities); -bool compliantWithV1_1(const V1_0::Capabilities& capabilities); -bool compliantWithV1_1(const V1_1::Capabilities& capabilities); -bool compliantWithV1_1(const V1_2::Capabilities& capabilities); -bool compliantWithV1_1(const V1_3::Capabilities& capabilities); -bool compliantWithV1_2(const V1_0::Capabilities& capabilities); -bool compliantWithV1_2(const V1_1::Capabilities& capabilities); -bool compliantWithV1_2(const V1_2::Capabilities& capabilities); -bool compliantWithV1_2(const V1_3::Capabilities& capabilities); -bool compliantWithV1_3(const V1_0::Capabilities& capabilities); -bool compliantWithV1_3(const V1_1::Capabilities& capabilities); -bool compliantWithV1_3(const V1_2::Capabilities& capabilities); -bool compliantWithV1_3(const V1_3::Capabilities& capabilities); - -// If noncompliantOperations != nullptr, then -// precondition: noncompliantOperations->empty() -// postcondition: *noncompliantOperations consists of the indices of the noncompliant -// operations; if the compliance check fails for some reason -// other than a noncompliant operation, -// *noncompliantOperations consists of the indices of all operations -bool compliantWithV1_0(const V1_0::Model& model); -bool compliantWithV1_0(const V1_1::Model& model); -bool compliantWithV1_0(const V1_2::Model& model, - std::set<uint32_t>* noncompliantOperations = nullptr); -bool compliantWithV1_0(const V1_3::Model& model, - std::set<uint32_t>* noncompliantOperations = nullptr); -bool compliantWithV1_1(const V1_0::Model& model); -bool compliantWithV1_1(const V1_1::Model& model); -bool compliantWithV1_1(const V1_2::Model& model, - std::set<uint32_t>* noncompliantOperations = nullptr); -bool compliantWithV1_1(const V1_3::Model& model, - std::set<uint32_t>* noncompliantOperations = nullptr); -bool compliantWithV1_2(const V1_0::Model& model); -bool compliantWithV1_2(const V1_1::Model& model); -bool compliantWithV1_2(const V1_2::Model& model, - std::set<uint32_t>* noncompliantOperations = nullptr); -bool compliantWithV1_2(const V1_3::Model& model, - std::set<uint32_t>* noncompliantOperations = nullptr); - -V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status); -V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status); -V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status); -V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status); - -V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities); -V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities); -V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities); -V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities); -V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities); -V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities); -V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities); -V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities); -V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities); -V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities); -V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities); -V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities); -V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities); -V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities); -V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities); -V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities); - -V1_0::Model convertToV1_0(const V1_0::Model& model); -V1_0::Model convertToV1_0(const V1_1::Model& model); -V1_0::Model convertToV1_0(const V1_2::Model& model); -V1_0::Model convertToV1_0(const V1_3::Model& model); -V1_1::Model convertToV1_1(const V1_0::Model& model); -V1_1::Model convertToV1_1(const V1_1::Model& model); -V1_1::Model convertToV1_1(const V1_2::Model& model); -V1_1::Model convertToV1_1(const V1_3::Model& model); -V1_2::Model convertToV1_2(const V1_0::Model& model); -V1_2::Model convertToV1_2(const V1_1::Model& model); -V1_2::Model convertToV1_2(const V1_2::Model& model); -V1_2::Model convertToV1_2(const V1_3::Model& model); -V1_3::Model convertToV1_3(const V1_0::Model& model); -V1_3::Model convertToV1_3(const V1_1::Model& model); -V1_3::Model convertToV1_3(const V1_2::Model& model); -V1_3::Model convertToV1_3(const V1_3::Model& model); - -V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type); -V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type); -V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type); - -V1_0::Operand convertToV1_0(const V1_2::Operand& operand); -V1_0::Operand convertToV1_0(const V1_3::Operand& operand); -V1_2::Operand convertToV1_2(const V1_0::Operand& operand); -V1_2::Operand convertToV1_2(const V1_3::Operand& operand); -V1_3::Operand convertToV1_3(const V1_0::Operand& operand); -V1_3::Operand convertToV1_3(const V1_2::Operand& operand); -V1_3::Operand convertToV1_3(const V1_3::Operand& operand); - -hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_0::Operand>& operands); -hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_2::Operand>& operands); -hardware::hidl_vec<V1_0::Operand> convertToV1_0(const hardware::hidl_vec<V1_3::Operand>& operands); -hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_0::Operand>& operands); -hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_2::Operand>& operands); -hardware::hidl_vec<V1_2::Operand> convertToV1_2(const hardware::hidl_vec<V1_3::Operand>& operands); -hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_0::Operand>& operands); -hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_2::Operand>& operands); -hardware::hidl_vec<V1_3::Operand> convertToV1_3(const hardware::hidl_vec<V1_3::Operand>& operands); - -bool compliantWithV1_0(const V1_0::Request& request); -bool compliantWithV1_0(const V1_3::Request& request); -bool compliantWithV1_2(const V1_3::Request& request); - -V1_0::Request convertToV1_0(const V1_0::Request& request); -V1_0::Request convertToV1_0(const V1_3::Request& request); -V1_0::Request convertToV1_2(const V1_3::Request& request); -V1_3::Request convertToV1_3(const V1_0::Request& request); -V1_3::Request convertToV1_3(const V1_3::Request& request); - -bool compliantWithV1_0(V1_0::OperandLifeTime lifetime); -bool compliantWithV1_0(V1_3::OperandLifeTime lifetime); -bool compliantWithV1_3(V1_0::OperandLifeTime lifetime); -bool compliantWithV1_3(V1_3::OperandLifeTime lifetime); - -V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime); -V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime); -V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime); -V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime); - -constexpr V1_3::Priority convertToHalPriority(int32_t priority) { - switch (priority) { - case ANEURALNETWORKS_PRIORITY_LOW: - return V1_3::Priority::LOW; - case ANEURALNETWORKS_PRIORITY_MEDIUM: - return V1_3::Priority::MEDIUM; - case ANEURALNETWORKS_PRIORITY_HIGH: - return V1_3::Priority::HIGH; - } - LOG(FATAL) << "unrecognized priority: " << priority; - return {}; -} - -// DEPRECATED. Use checked conversions from nnapi/hal/1.X/Conversions.h. -Capabilities::OperandPerformance uncheckedConvert( - const V1_3::Capabilities::OperandPerformance& operandPerformance); -Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo); -Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities); -DataLocation uncheckedConvert(const V1_0::DataLocation& location); -ErrorStatus uncheckedConvert(V1_0::ErrorStatus status); -ErrorStatus uncheckedConvert(V1_3::ErrorStatus status); -Extension::OperandTypeInformation uncheckedConvert(const V1_2::Extension::OperandTypeInformation&); -Extension uncheckedConvert(const V1_2::Extension& extension); -hardware::hidl_vec<uint8_t> uncheckedConvert(const Operand::ExtensionParams& params); -MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure); -SharedMemory uncheckedConvert(const hardware::hidl_memory& memory); -Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix&); -Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph); -Model uncheckedConvert(const V1_3::Model& model); -Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec<uint8_t>& params); -Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params); -Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime); -Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params); -OperandType uncheckedConvert(V1_3::OperandType operandType); -Operand uncheckedConvert(const V1_3::Operand& operand); -OperationType uncheckedConvert(V1_3::OperationType operationType); -Operation uncheckedConvert(const V1_3::Operation& operation); -OptionalDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration); -OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape); -Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument); -Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool); -Request uncheckedConvert(const V1_3::Request& request); -std::vector<Extension> uncheckedConvert(const hardware::hidl_vec<V1_2::Extension>& extensions); -std::vector<SharedMemory> uncheckedConvert( - const hardware::hidl_vec<hardware::hidl_memory>& memories); -std::vector<Model::Subgraph> uncheckedConvert(const hardware::hidl_vec<V1_3::Subgraph>& subgraphs); -std::vector<Operand> uncheckedConvert(const hardware::hidl_vec<V1_3::Operand>& operands); -std::vector<OutputShape> uncheckedConvert( - const hardware::hidl_vec<V1_2::OutputShape>& outputShapes); -std::vector<Request::MemoryPool> uncheckedConvert( - const hardware::hidl_vec<V1_3::Request::MemoryPool>& memoryPools); -Timing uncheckedConvert(const V1_2::Timing& timing); - -// DEPRECATED. Use conversions from nnapi/hal/1.X/Conversions.h. -hardware::hidl_memory convertToV1_0(const SharedMemory& memory); -hardware::hidl_vec<hardware::hidl_memory> convertToV1_0(const std::vector<SharedMemory>& memories); -hardware::hidl_vec<uint8_t> convertToV1_0(const Model::OperandValues& operandValues); -hardware::hidl_vec<V1_2::OutputShape> convertToV1_2(const std::vector<OutputShape>& outputShapes); -hardware::hidl_vec<V1_3::BufferRole> convertToV1_3(const std::vector<BufferRole>& bufferRoles); -V1_0::DataLocation convertToV1_0(const DataLocation& location); -V1_0::ErrorStatus convertToV1_0(ErrorStatus status); -V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument); -V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference); -V1_2::MeasureTiming convertToV1_2(MeasureTiming measure); -V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix&); -V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params); -V1_2::OutputShape convertToV1_2(const OutputShape& outputShape); -V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params); -V1_2::Timing convertToV1_2(const Timing& timing); -V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole); -V1_3::ErrorStatus convertToV1_3(ErrorStatus status); -V1_3::Model convertToV1_3(const Model& model); -V1_3::Operand convertToV1_3(const Operand& operand); -V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime); -V1_3::OperandType convertToV1_3(OperandType operandType); -V1_3::Operation convertToV1_3(const Operation& operation); -V1_3::OperationType convertToV1_3(OperationType operationType); -V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalDuration& timeoutDuration); -V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint); -V1_3::Priority convertToV1_3(Priority priority); -V1_3::Request convertToV1_3(const Request& request); -V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool); -V1_3::Subgraph convertToV1_3(const Model::Subgraph& model); - -} // namespace nn -} // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_HAL_UTILS_H
diff --git a/common/include/LegacyUtils.h b/common/include/LegacyUtils.h deleted file mode 100644 index c99311d..0000000 --- a/common/include/LegacyUtils.h +++ /dev/null
@@ -1,327 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// This file contains pre-canonical-types utility code and does not includes HAL -// utilities. LegacyHalUtils.h is a superset of these utilities that includes -// HAL utilities. - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_UTILS_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_UTILS_H - -#include <android-base/logging.h> -#include <nnapi/TypeUtils.h> -#include <nnapi/Types.h> - -#include <string> -#include <tuple> -#include <utility> -#include <vector> - -#include "NeuralNetworks.h" -#include "OperationResolver.h" -#include "nnapi/TypeUtils.h" -#include "nnapi/Types.h" - -namespace android { -namespace nn { - -// The number of data types (OperandCode) defined in NeuralNetworks.h. -const int kNumberOfDataTypes = 16; - -// The number of operation types (OperationCode) defined in NeuralNetworks.h. -const int kNumberOfOperationTypes = 102; -static_assert(kNumberOfOperationTypes == BuiltinOperationResolver::kNumberOfOperationTypes); - -// The number of execution preferences defined in NeuralNetworks.h. -const int kNumberOfPreferences = 3; - -// The number of data types (OperandCode) defined in NeuralNetworksOEM.h. -const int kNumberOfDataTypesOEM = 2; - -// The number of operation types (OperationCode) defined in NeuralNetworksOEM.h. -const int kNumberOfOperationTypesOEM = 1; - -// The lowest number assigned to any OEM Code in NeuralNetworksOEM.h. -const int kOEMCodeBase = 10000; - -/* IMPORTANT: if you change the following list, don't - * forget to update the corresponding 'tags' table in - * the initVlogMask() function implemented in Utils.cpp. - */ -enum VLogFlags { MODEL = 0, COMPILATION, EXECUTION, CPUEXE, MANAGER, DRIVER, MEMORY }; - -#define VLOG_IS_ON(TAG) ((vLogMask & (1 << (TAG))) != 0) - -#define VLOG(TAG) \ - if (LIKELY(!VLOG_IS_ON(TAG))) \ - ; \ - else \ - LOG(INFO) - -extern int vLogMask; -void initVLogMask(); - -#ifdef NN_DEBUGGABLE -#define SHOW_IF_DEBUG(msg) msg -#else -#define SHOW_IF_DEBUG(msg) "" -#endif - -// DEPRECATED(b/118737105). Use CHECK. -#define nnAssert(v) CHECK(v) - -#define NN_RETURN_IF_ERROR(expr) \ - do { \ - int _errorCode = (expr); \ - if (_errorCode != ANEURALNETWORKS_NO_ERROR) { \ - return _errorCode; \ - } \ - } while (0) - -// Make a Duration from a duration in nanoseconds. If the value exceeds the max duration, return the -// maximum expressible duration. -Duration makeTimeoutDuration(uint64_t nanoseconds); - -// Make a Duration from a duration in nanoseconds. If the value exceeds the max duration, return the -// maximum expressible duration. If nanoseconds == -1, the duration is omitted. Precondition: -// nanoseconds >= -1 -OptionalDuration makeTimeoutDuration(int64_t nanoseconds); - -// Make a deadline from a duration. If the sum of the current time and the -// duration exceeds the max time, return a time point holding the maximum -// expressible time. -TimePoint makeDeadline(Duration duration); - -inline TimePoint makeDeadline(uint64_t duration) { - return makeDeadline(makeTimeoutDuration(duration)); -} - -// Convenience function. If the duration is provided, this function creates a -// deadline using makeDeadline. If the duration is not provided, this function -// returns std::nullopt. -inline OptionalTimePoint makeDeadline(OptionalDuration duration) { - return duration.has_value() ? std::make_optional(makeDeadline(*duration)) : OptionalTimePoint{}; -} -inline OptionalTimePoint makeDeadline(std::optional<uint64_t> duration) { - return duration.has_value() ? std::make_optional(makeDeadline(*duration)) : OptionalTimePoint{}; -} -inline OptionalTimePoint makeDeadline(int64_t duration) { - return makeDeadline(makeTimeoutDuration(duration)); -} - -// Returns true if the deadline has passed. Returns false if either the deadline -// has not been exceeded or if the deadline is not present. -bool hasDeadlinePassed(const OptionalTimePoint& deadline); - -// Returns true if an operand type is an extension type. -bool isExtensionOperandType(OperandType type); - -// Returns true if an operation type is an extension type. -bool isExtensionOperationType(OperationType type); - -// Returns the amount of space needed to store a value of the specified -// dimensions and type. For a tensor with unspecified rank or at least one -// unspecified dimension, returns zero. -// -// Aborts if the specified type is an extension type. -// Aborts if the size would overflow the return type. -// -// See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&). -uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions); - -// Returns the amount of space needed to store a value of the dimensions and -// type of this operand. For a tensor with unspecified rank or at least one -// unspecified dimension, returns zero. -// -// Aborts if the specified type is an extension type. -// Aborts if the size would overflow the return type. -// -// See also TypeManager::getSizeOfData(const Operand&). -inline uint32_t nonExtensionOperandSizeOfData(const Operand& operand) { - return nonExtensionOperandSizeOfData(operand.type, operand.dimensions); -} - -// Returns the amount of space needed to store a value of the specified -// dimensions and element size. For a tensor with unspecified rank or at least -// one unspecified dimension, returns zero. -// -// Aborts if the size would overflow the return type. -// -// See also TypeManager::getSizeOfData(const Operand&). -uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& dimensions); - -// Returns true if the amount of space needed to store a value of the specified -// dimensions and element size overflows the uint32_t type. -// -// Aborts if the specified type is an extension type. -// -// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&). -bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, - const std::vector<uint32_t>& dimensions); - -// Returns true if the amount of space needed to store a value of the specified -// dimensions and element size overflows the uint32_t type. -// -// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&). -bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector<uint32_t>& dimensions); - -// Returns true if a non-extension operand type is a scalar type. -// -// Aborts if the specified type is an extension type. -// -// See also TypeManager::isTensorType(OperandType). -bool nonExtensionOperandTypeIsScalar(int type); - -// Whether an operand of tensor type has unspecified dimensions. -// -// Undefined behavior if the operand type is a scalar type. -bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount); -bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t>& dimensions); -bool tensorHasUnspecifiedDimensions(OperandType type, const Dimensions& dimensions); -bool tensorHasUnspecifiedDimensions(const Operand& operand); -bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type); - -// Returns the number of padding bytes needed to align data starting at `index` with `length` number -// of bytes such that `index` + returned number of padding bytes is aligned. Refer to -// `getAlignmentForLength` for more information on alignment (such as what the current alignments -// are for different data lengths). -uint32_t alignBytesNeeded(uint32_t index, size_t length); - -// Does a detailed LOG(INFO) of the model -void logModelToInfo(const Model& model); - -inline std::string toString(uint32_t obj) { - return std::to_string(obj); -} - -template <typename Type> -std::string toString(const std::vector<Type>& range) { - std::string os = "["; - for (size_t i = 0; i < range.size(); ++i) { - os += (i == 0 ? "" : ", ") + toString(range[i]); - } - return os += "]"; -} - -template <typename A, typename B> -std::string toString(const std::pair<A, B>& pair) { - std::ostringstream oss; - oss << "(" << pair.first << ", " << pair.second << ")"; - return oss.str(); -} - -inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) { - return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM); -} - -// Validates an operand type. -// -// extensionOperandTypeInfo must be nullptr iff the type is not an extension type. -// -// If allowPartial is true, the dimensions may be underspecified. -int validateOperandType(const ANeuralNetworksOperandType& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, - const char* tag, bool allowPartial); -int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount, - const char* tag); - -// A set of functions to help validate models containing IF or WHILE operations. -struct SubgraphValidationHelper { - // Checks if a given operand is a SUBGRAPH operand with a valid offset. - std::function<bool(const Operand&)> isValidSubgraphReference; - // Gets the input count of a subgraph referenced by a given operand. - std::function<uint32_t(const Operand&)> getSubgraphInputCount; - // Gets the output count of a subgraph referenced by a given operand. - std::function<uint32_t(const Operand&)> getSubgraphOutputCount; - // Gets the specified input operand of a subgraph referenced by a given operand. - std::function<const Operand*(const Operand&, uint32_t)> getSubgraphInputOperand; - // Gets the specified output operand of a subgraph referenced by a given operand. - std::function<const Operand*(const Operand&, uint32_t)> getSubgraphOutputOperand; - // Whether control flow operations with inner or outer input or output - // operands of unknown size are allowed. - bool allowControlFlowOperationWithOperandOfUnknownSize; -}; - -// Returns ANEURALNETWORKS_NO_ERROR if the corresponding operation is defined and can handle the -// provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA. -// The last argument is only used for validating IF and WHILE operations. -int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, - const uint32_t* inputIndexes, uint32_t outputCount, - const uint32_t* outputIndexes, const std::vector<Operand>& operands, - HalVersion halVersion, const SubgraphValidationHelper& helper); - -inline size_t getSizeFromInts(int lower, int higher) { - return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32); -} - -// Convert ANEURALNETWORKS_* result code to ErrorStatus. -// Not guaranteed to be a 1-to-1 mapping. -ErrorStatus convertResultCodeToErrorStatus(int resultCode); - -// Convert ErrorStatus to ANEURALNETWORKS_* result code. -// Not guaranteed to be a 1-to-1 mapping. -int convertErrorStatusToResultCode(ErrorStatus status); - -// Convert execution results to runtime format. Additionally checks that the -// returned results abide by the HAL specification, and logs an error if the -// result violates the specification. -std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult( - ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing); - -constexpr Priority convertToCanonicalPriority(int32_t priority) { - switch (priority) { - case ANEURALNETWORKS_PRIORITY_LOW: - return Priority::LOW; - case ANEURALNETWORKS_PRIORITY_MEDIUM: - return Priority::MEDIUM; - case ANEURALNETWORKS_PRIORITY_HIGH: - return Priority::HIGH; - } - LOG(FATAL) << "unrecognized priority: " << priority; - return {}; -} - -// The function syncWait() has the same semantics as the system function -// ::sync_wait(), except that the syncWait() return value is semantically -// richer. The timeout parameter is in msecs. -enum class FenceState { - ACTIVE, // fence has not been signaled - SIGNALED, // fence has been signaled - ERROR, // fence has been placed in the error state - UNKNOWN, // either bad argument passed to syncWait(), or internal error -}; -FenceState syncWait(int fd, int timeout); - -#ifdef NN_DEBUGGABLE -uint32_t getProp(const char* str, uint32_t defaultValue = 0); -#endif // NN_DEBUGGABLE - -struct ApiVersion { - Version canonical; - int64_t featureLevel; -}; - -constexpr auto kHalVersionV1_0ToApi = ApiVersion{.canonical = Version::ANDROID_OC_MR1, - .featureLevel = ANEURALNETWORKS_FEATURE_LEVEL_1}; -constexpr auto kHalVersionV1_1ToApi = ApiVersion{.canonical = Version::ANDROID_P, - .featureLevel = ANEURALNETWORKS_FEATURE_LEVEL_2}; -constexpr auto kHalVersionV1_2ToApi = ApiVersion{.canonical = Version::ANDROID_Q, - .featureLevel = ANEURALNETWORKS_FEATURE_LEVEL_3}; -constexpr auto kHalVersionV1_3ToApi = ApiVersion{.canonical = Version::ANDROID_R, - .featureLevel = ANEURALNETWORKS_FEATURE_LEVEL_4}; -} // namespace nn -} // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_UTILS_H
diff --git a/common/include/MetaModel.h b/common/include/MetaModel.h index 78c3b19..c22ee1b 100644 --- a/common/include/MetaModel.h +++ b/common/include/MetaModel.h
@@ -17,8 +17,9 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_META_MODEL_H #define ANDROID_FRAMEWORKS_ML_NN_COMMON_META_MODEL_H -#include <android-base/macros.h> +#include "HalInterfaces.h" +#include <android-base/macros.h> #include <functional> #include <map> #include <optional> @@ -26,31 +27,29 @@ #include <utility> #include <vector> -#include "nnapi/Types.h" - namespace android::nn { // The MetaModel class encapsulates a Model and provides machinery to create // from that original Model a "slice" of that Model consisting of: -// - the subset of operations that is compliant with a particular version; and +// - the subset of operations that is compliant with a particular HAL version; and // - a mechanism for mapping operations from the slice back to operations of the // original Model. -// The slice is intended to be passed to IDevice::getSupportedOperations(), +// The slice is intended to be passed to IDevice::getSupportedOperations*(), // with the mapping used to translate the results of that call from the slice's // operations to the original Model's operations. The slice has no other // purpose (for example, it is not guaranteed to have the same topology as a // subgraph of the original model). // -// When getSlice() is called, a slice is created and cached, if necessary; and -// then the cached slice is returned. +// When a getSlice*() method is called, a slice is created and cached, if +// necessary; and then the cached slice is returned. // -// The meaning of the return value of getSlice() is explained by the following -// example: +// The meaning of the return value of the getSlice*() methods is explained by +// the following example: // // const MetaModel& metaModel = ...; -// auto ret = metaModel.getSlice(Version::ANDROID_OC_MR1); +// auto ret = metaModel.getSliceV1_0(); // getSliceV1_1() is similar // if (ret.has_value()) { -// const Model model = ret->first; // the slice +// const V1_0::Model model = ret->first; // the slice // auto mapper = ret->second; // // mapper is a functor that takes an operation index in the // // slice and returns the corresponding operation index in the @@ -58,31 +57,33 @@ // // of the MetaModel. // } else { // // Could not obtain a slice. For example, perhaps none of the -// // original model's operations are compliant with -// // Version::ANDROID_OC_MR1. +// // original model's operations are compliant with V1_0. // } // class MetaModel { public: using Mapper = std::function<uint32_t(uint32_t)>; - using ReturnedSlice = std::optional<std::pair<Model, Mapper>>; + template <class T_Model> + using ReturnedSlice = std::optional<std::pair<T_Model, Mapper>>; - // Precondition: validate(model).has_value() - MetaModel(Model model, bool strictSlicing); + MetaModel(hal::Model model, bool strictSlicing) + : mHidlModel(std::move(model)), mStrictSlicing(strictSlicing) {} - const Model& getModel() const { return mModel; } + const hal::Model& getModel() const { return mHidlModel; } - ReturnedSlice getSlice(Version version) const; + ReturnedSlice<hal::V1_0::Model> getSliceV1_0() const { return getSlice(&mSliceV1_0); } + ReturnedSlice<hal::V1_1::Model> getSliceV1_1() const { return getSlice(&mSliceV1_1); } + ReturnedSlice<hal::V1_2::Model> getSliceV1_2() const { return getSlice(&mSliceV1_2); } // Disallowing copy constructor and assignment operator is for efficiency, // not for correctness. The default copy constructor and assignment // operator would work fine. However, they could be surprisingly expensive - // if the mCachedSlices member gets copied: Up to one Model instance and - // one std::vector instance per version could be copied. We could choose - // to accept this expense; or we could write custom copy and assign that do - // not copy the mCachedSlices member but instead set the destination - // mCachedSlices Slice::mState members to SliceState::UNINITIALIZED. + // if the mSlice* members get copied: Up to three Model instances and two + // std::vector instances could be copied. We could choose to accept this + // expense; or we could write custom copy and assign that do not copy the + // mSlice* members but instead set the destination mSlice* members to + // SliceState::UNINITIALIZED. // // There are no such issues with move constructor and move assignment. MetaModel(const MetaModel&) = delete; @@ -91,39 +92,50 @@ MetaModel& operator=(MetaModel&&) = default; private: - Model mModel; - Version mModelMinimumSupportedVersion; + hal::Model mHidlModel; - // mStrictSlicing controls validity checking. If the slicing algorithm + // mStrictSlicing controls sanity checking. If the slicing algorithm // produces an invalid model (because something has gone wrong with the - // algorithm or with a utility function it depends on), getSlice() can + // algorithm or with a utility function it depends on), getSlice*() can // return an std::optional<> for which has_value() returns false, signifying // that no slice is available. However, if mStrictSlicing is true, - // getSlice() cause a CHECK*() to fail. This can be used in debugging to + // getSlice*() cause a CHECK*() to fail. This can be used in debugging to // find situations where slicing has failed unexpectedly. bool mStrictSlicing; enum class SliceState { UNINITIALIZED, INVALID, NORMAL }; + template <class T_SlicedModel> struct Slice { SliceState mState = SliceState::UNINITIALIZED; - Model mModel; + T_SlicedModel mHidlModel; std::vector<uint32_t> mSlicedOperationIndexToOrigIndex; + + using Operand = typename decltype(mHidlModel.operands)::value_type; + using Operation = typename decltype(mHidlModel.operations)::value_type; + using OperationType = decltype(Operation::type); }; + mutable Slice<hal::V1_0::Model> mSliceV1_0; + mutable Slice<hal::V1_1::Model> mSliceV1_1; + mutable Slice<hal::V1_2::Model> mSliceV1_2; - mutable std::map<Version, Slice> mCachedSlices; + template <class T_SlicedModel> + ReturnedSlice<T_SlicedModel> getSlice(Slice<T_SlicedModel>* slice) const; - Slice makeSlice(Version version) const; - - std::set<uint32_t> getNoncompliantOperations(Version version) const; + template <class T_SlicedModel> + Slice<T_SlicedModel> makeSlice() const; // Utility class for makeSlice(). + template <typename T_SlicedOperand> class OrigOperandToSlicedInputOperandIndex; // Utility function for makeSlice(): Walks operations of original // model and populates sliced model accordingly. + template <class T_SlicedModel> void processOperations( - Slice* slice, std::map<uint32_t, uint32_t>* origOperandIndexToSlicedIndex, - OrigOperandToSlicedInputOperandIndex* origOperandToSlicedInputOperandIndex, + Slice<T_SlicedModel>* slice, + std::map<uint32_t, uint32_t>* origOperandIndexToSlicedIndex, + OrigOperandToSlicedInputOperandIndex<typename Slice<T_SlicedModel>::Operand>* + origOperandToSlicedInputOperandIndex, const std::set<uint32_t>& noncompliantOperations, const std::set<uint32_t>& inputOperandIndexesOfCompliantOperations) const; };
diff --git a/common/include/OperationResolver.h b/common/include/OperationResolver.h index 04719b9..ab70e4c 100644 --- a/common/include/OperationResolver.h +++ b/common/include/OperationResolver.h
@@ -17,8 +17,7 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATION_RESOLVER_H #define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATION_RESOLVER_H -#include <utility> - +#include "HalInterfaces.h" #include "OperationsUtils.h" namespace android { @@ -26,11 +25,11 @@ // Encapsulates an operation implementation. struct OperationRegistration { - OperationType type; + hal::OperationType type; const char* name; // Validates operand types, shapes, and any values known during graph creation. - std::function<Result<Version>(const IOperationValidationContext*)> validate; + std::function<bool(const IOperationValidationContext*)> validate; // prepare is called when the inputs this operation depends on have been // computed. Typically, prepare does any remaining validation and sets @@ -48,23 +47,22 @@ bool allowZeroSizedInput = false; } flags; - OperationRegistration( - OperationType type, const char* name, - std::function<Result<Version>(const IOperationValidationContext*)> validate, - std::function<bool(IOperationExecutionContext*)> prepare, - std::function<bool(IOperationExecutionContext*)> execute, Flag flags) + OperationRegistration(hal::OperationType type, const char* name, + std::function<bool(const IOperationValidationContext*)> validate, + std::function<bool(IOperationExecutionContext*)> prepare, + std::function<bool(IOperationExecutionContext*)> execute, Flag flags) : type(type), name(name), - validate(std::move(validate)), - prepare(std::move(prepare)), - execute(std::move(execute)), + validate(validate), + prepare(prepare), + execute(execute), flags(flags) {} }; // A registry of operation implementations. class IOperationResolver { public: - virtual const OperationRegistration* findOperation(OperationType operationType) const = 0; + virtual const OperationRegistration* findOperation(hal::OperationType operationType) const = 0; virtual ~IOperationResolver() {} }; @@ -88,10 +86,7 @@ return &instance; } - const OperationRegistration* findOperation(OperationType operationType) const override; - - // The number of operation types (OperationCode) defined in NeuralNetworks.h. - static constexpr int kNumberOfOperationTypes = 102; + const OperationRegistration* findOperation(hal::OperationType operationType) const override; private: BuiltinOperationResolver(); @@ -121,11 +116,11 @@ // .allowZeroSizedInput = true); // #ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#define NN_REGISTER_OPERATION(identifier, operationName, validate, prepare, execute, ...) \ - const OperationRegistration* register_##identifier() { \ - static OperationRegistration registration(OperationType::identifier, operationName, \ - validate, prepare, execute, {__VA_ARGS__}); \ - return ®istration; \ +#define NN_REGISTER_OPERATION(identifier, operationName, validate, prepare, execute, ...) \ + const OperationRegistration* register_##identifier() { \ + static OperationRegistration registration(hal::OperationType::identifier, operationName, \ + validate, prepare, execute, {__VA_ARGS__}); \ + return ®istration; \ } #else // This version ignores CPU execution logic (prepare and execute). @@ -134,7 +129,7 @@ #define NN_REGISTER_OPERATION(identifier, operationName, validate, unused_prepare, unused_execute, \ ...) \ const OperationRegistration* register_##identifier() { \ - static OperationRegistration registration(OperationType::identifier, operationName, \ + static OperationRegistration registration(hal::OperationType::identifier, operationName, \ validate, nullptr, nullptr, {__VA_ARGS__}); \ return ®istration; \ }
diff --git a/common/include/Operations.h b/common/include/Operations.h index 89b55ad..e4df10f 100644 --- a/common/include/Operations.h +++ b/common/include/Operations.h
@@ -17,14 +17,6 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_H #define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_H -#include <stddef.h> - -#include <cstdint> -#include <vector> - -#include "ActivationFunctor.h" - -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD #include "operations/BidirectionalSequenceLSTM.h" #include "operations/Cast.h" #include "operations/EmbeddingLookup.h" @@ -39,7 +31,11 @@ #include "operations/RNN.h" #include "operations/SVDF.h" #include "operations/Tile.h" -#endif // NN_COMPATIBILITY_LIBRARY_BUILD + +#include <stddef.h> + +#include <cstdint> +#include <vector> namespace android { namespace nn {
diff --git a/common/include/OperationsUtils.h b/common/include/OperationsUtils.h index 9123139..a8a07db 100644 --- a/common/include/OperationsUtils.h +++ b/common/include/OperationsUtils.h
@@ -21,8 +21,8 @@ #include <cstdint> #include <vector> -#include "nnapi/TypeUtils.h" -#include "nnapi/Types.h" +#include "HalInterfaces.h" +#include "Utils.h" namespace android { namespace nn { @@ -45,11 +45,11 @@ // Stores operand type information. "Shape" is a historical name. struct Shape { - OperandType type = OperandType::FLOAT32; + hal::OperandType type = hal::OperandType::FLOAT32; std::vector<uint32_t> dimensions; float scale = 0.0f; int32_t offset = 0; - Operand::ExtraParams extraParams; + hal::OperandExtraParams extraParams; }; // Provides information available during graph creation to validate an operation. @@ -59,13 +59,29 @@ virtual const char* getOperationName() const = 0; + // The HAL version of the environment in which the operation is to be + // executed. + // + // Operation validation logic needs to handle all HAL versions to support + // the following use cases (assume in these examples that the latest HAL + // version is V1_2): + // 1. Our runtime wants to distribute work to a driver implementing an older + // HAL version and calls, for example, + // compliantWithV1_0(const V1_2::Model&). + // 2. A driver implements an older HAL version and delegates model + // validation to, for example, validateModel(const V1_0::Model&). + // + // If getHalVersion() returns HalVersion::V1_0 and the operation + // is only supported since HalVersion::V1_1, validation will fail. + virtual HalVersion getHalVersion() const = 0; + virtual uint32_t getNumInputs() const = 0; - virtual OperandType getInputType(uint32_t index) const = 0; + virtual hal::OperandType getInputType(uint32_t index) const = 0; virtual Shape getInputShape(uint32_t index) const = 0; - virtual const Operand::ExtraParams& getInputExtraParams(uint32_t index) const = 0; + virtual const hal::OperandExtraParams getInputExtraParams(uint32_t index) const = 0; virtual uint32_t getNumOutputs() const = 0; - virtual OperandType getOutputType(uint32_t index) const = 0; + virtual hal::OperandType getOutputType(uint32_t index) const = 0; virtual Shape getOutputShape(uint32_t index) const = 0; }; @@ -75,13 +91,13 @@ virtual ~IOperationExecutionContext() {} virtual uint32_t getNumInputs() const = 0; - virtual OperandType getInputType(uint32_t index) const = 0; + virtual hal::OperandType getInputType(uint32_t index) const = 0; virtual Shape getInputShape(uint32_t index) const = 0; virtual const void* getInputBuffer(uint32_t index) const = 0; - virtual const Operand::ExtraParams& getInputExtraParams(uint32_t index) const = 0; + virtual const hal::OperandExtraParams getInputExtraParams(uint32_t index) const = 0; virtual uint32_t getNumOutputs() const = 0; - virtual OperandType getOutputType(uint32_t index) const = 0; + virtual hal::OperandType getOutputType(uint32_t index) const = 0; virtual Shape getOutputShape(uint32_t index) const = 0; virtual void* getOutputBuffer(uint32_t index) = 0; @@ -109,16 +125,16 @@ // Verifies that the number and types of operation inputs are as expected. bool validateInputTypes(const IOperationValidationContext* context, - const std::vector<OperandType>& expectedTypes); + const std::vector<hal::OperandType>& expectedTypes); // Verifies that the number and types of operation outputs are as expected. bool validateOutputTypes(const IOperationValidationContext* context, - const std::vector<OperandType>& expectedTypes); + const std::vector<hal::OperandType>& expectedTypes); // Verifies that the HAL version specified in the context is greater or equal // than the minimal supported HAL version. -bool validateVersion(const IOperationValidationContext* context, Version contextVersion, - Version minSupportedVersion); +bool validateHalVersion(const IOperationValidationContext* context, + HalVersion minSupportedHalVersion); // Verifies that the two shapes are the same. bool SameShape(const Shape& in1, const Shape& in2);
diff --git a/common/include/TokenHasher.h b/common/include/TokenHasher.h index a849619..3245138 100644 --- a/common/include/TokenHasher.h +++ b/common/include/TokenHasher.h
@@ -17,12 +17,12 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_TOKEN_HASHER_H #define ANDROID_FRAMEWORKS_ML_NN_COMMON_TOKEN_HASHER_H -#include <android-base/macros.h> -#include <openssl/sha.h> - #include <cstring> #include <vector> +#include <android-base/macros.h> +#include <openssl/sha.h> + namespace android { namespace nn {
diff --git a/common/include/Tracing.h b/common/include/Tracing.h index 1953fe1..e461b2b 100644 --- a/common/include/Tracing.h +++ b/common/include/Tracing.h
@@ -17,10 +17,8 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_TRACING_H #define ANDROID_FRAMEWORKS_ML_NN_COMMON_TRACING_H -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD #define ATRACE_TAG ATRACE_TAG_NNAPI #include <utils/Trace.h> -#endif // NN_COMPATIBILITY_LIBRARY_BUILD // Neural Networks API (NNAPI) systracing // @@ -151,8 +149,6 @@ #define NNTRACE_LAYER_OTHER "LO" #define NNTRACE_LAYER_UTILITY "LU" // Code used from multiple layers -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD - // Implementation // // Almost same as ATRACE_NAME, but enforcing explicit distinction between @@ -167,13 +163,6 @@ android::ScopedTrace PASTE(___tracer, __LINE__)(ATRACE_TAG, name); \ (void)___tracer_1 // ensure switch is only used after a basic trace -#else - -#define NNTRACE_NAME_1(name) // empty -#define NNTRACE_NAME_SWITCH(name) // empty - -#endif // NN_COMPATIBILITY_LIBRARY_BUILD - // Disallow use of raw ATRACE macros #undef ATRACE_NAME #undef ATRACE_CALL
diff --git a/common/include/Utils.h b/common/include/Utils.h index 7ac1e59..4bbbf82 100644 --- a/common/include/Utils.h +++ b/common/include/Utils.h
@@ -17,7 +17,569 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H #define ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H -#include "LegacyHalUtils.h" -#include "LegacyUtils.h" +#include <android-base/logging.h> + +#include <set> +#include <string> +#include <tuple> +#include <utility> +#include <vector> + +#include "HalInterfaces.h" +#include "NeuralNetworks.h" +#include "ValidateHal.h" + +namespace android { +namespace nn { + +// The number of data types (OperandCode) defined in NeuralNetworks.h. +const int kNumberOfDataTypes = 16; + +// The number of operation types (OperationCode) defined in NeuralNetworks.h. +const int kNumberOfOperationTypes = 102; + +// The number of execution preferences defined in NeuralNetworks.h. +const int kNumberOfPreferences = 3; + +// The number of data types (OperandCode) defined in NeuralNetworksOEM.h. +const int kNumberOfDataTypesOEM = 2; + +// The number of operation types (OperationCode) defined in NeuralNetworksOEM.h. +const int kNumberOfOperationTypesOEM = 1; + +// The lowest number assigned to any OEM Code in NeuralNetworksOEM.h. +const int kOEMCodeBase = 10000; + +/* IMPORTANT: if you change the following list, don't + * forget to update the corresponding 'tags' table in + * the initVlogMask() function implemented in Utils.cpp. + */ +enum VLogFlags { MODEL = 0, COMPILATION, EXECUTION, CPUEXE, MANAGER, DRIVER, MEMORY }; + +#define VLOG_IS_ON(TAG) ((vLogMask & (1 << (TAG))) != 0) + +#define VLOG(TAG) \ + if (LIKELY(!VLOG_IS_ON(TAG))) \ + ; \ + else \ + LOG(INFO) + +extern int vLogMask; +void initVLogMask(); + +#ifdef NN_DEBUGGABLE +#define SHOW_IF_DEBUG(msg) msg +#else +#define SHOW_IF_DEBUG(msg) "" +#endif + +// DEPRECATED(b/118737105). Use CHECK. +#define nnAssert(v) CHECK(v) + +#define NN_RETURN_IF_ERROR(expr) \ + do { \ + int _errorCode = (expr); \ + if (_errorCode != ANEURALNETWORKS_NO_ERROR) { \ + return _errorCode; \ + } \ + } while (0) + +// The NN_RET_CHECK family of macros defined below is similar to the CHECK family defined in +// system/core/base/include/android-base/logging.h +// +// The difference is that NN_RET_CHECK macros use LOG(ERROR) instead of LOG(FATAL) +// and return false instead of aborting. + +// Logs an error and returns false. Append context using << after. For example: +// +// NN_RET_CHECK_FAIL() << "Something went wrong"; +// +// The containing function must return a bool. +#define NN_RET_CHECK_FAIL() \ + return ::android::nn::FalseyErrorStream() \ + << "NN_RET_CHECK failed (" << __FILE__ << ":" << __LINE__ << "): " + +// Logs an error and returns false if condition is false. Extra logging can be appended using << +// after. For example: +// +// NN_RET_CHECK(false) << "Something went wrong"; +// +// The containing function must return a bool. +#define NN_RET_CHECK(condition) \ + while (UNLIKELY(!(condition))) NN_RET_CHECK_FAIL() << #condition << " " + +// Helper for NN_CHECK_xx(x, y) macros. +#define NN_RET_CHECK_OP(LHS, RHS, OP) \ + for (auto _values = ::android::base::MakeEagerEvaluator(LHS, RHS); \ + UNLIKELY(!(_values.lhs OP _values.rhs)); \ + /* empty */) \ + NN_RET_CHECK_FAIL() << #LHS << " " << #OP << " " << #RHS << " (" << #LHS << " = " \ + << _values.lhs << ", " << #RHS << " = " << _values.rhs << ") " + +// Logs an error and returns false if a condition between x and y does not hold. Extra logging can +// be appended using << after. For example: +// +// NN_RET_CHECK_EQ(a, b) << "Something went wrong"; +// +// The values must implement the appropriate comparison operator as well as +// `operator<<(std::ostream&, ...)`. +// The containing function must return a bool. +#define NN_RET_CHECK_EQ(x, y) NN_RET_CHECK_OP(x, y, ==) +#define NN_RET_CHECK_NE(x, y) NN_RET_CHECK_OP(x, y, !=) +#define NN_RET_CHECK_LE(x, y) NN_RET_CHECK_OP(x, y, <=) +#define NN_RET_CHECK_LT(x, y) NN_RET_CHECK_OP(x, y, <) +#define NN_RET_CHECK_GE(x, y) NN_RET_CHECK_OP(x, y, >=) +#define NN_RET_CHECK_GT(x, y) NN_RET_CHECK_OP(x, y, >) + +// Type to represent a deadline time point across processes. +using Deadline = std::chrono::steady_clock::time_point; + +// Make an Deadline from a duration. If the sum of the current time and the +// duration exceeds the max time, return a time point holding the maximum +// expressible time. +Deadline makeDeadline(uint64_t duration); + +// Convenience function. If the duration is provided, this function creates a +// Deadline using makeDeadline. If the duration is not provided, this function +// returns std::nullopt. +std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration); + +// Make an optional Deadline from an OptionalTimePoint. If +// timePoint.nanosecondsSinceEpoch cannot be represented in Deadline, return a +// time point holding the maximum Deadline. If the OptionalTimePoint is none, +// this function returns std::nullopt. +std::optional<Deadline> makeDeadline(const hal::OptionalTimePoint& timePoint); + +// Returns true if the deadline has passed. Returns false if either the deadline +// has not been exceeded or if the deadline is not present. +bool hasDeadlinePassed(const std::optional<Deadline>& deadline); + +// Make an OptionalTimePoint from an optional Deadline. If the Deadline is not +// provided, this function returns none for OptionalTimePoint. +hal::OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline); + +// Ensure that every user of FalseyErrorStream is linked to the +// correct instance, using the correct LOG_TAG +namespace { + +// A wrapper around LOG(ERROR) that can be implicitly converted to bool (always evaluates to false). +// Used to implement stream logging in NN_RET_CHECK. +class FalseyErrorStream { + DISALLOW_COPY_AND_ASSIGN(FalseyErrorStream); + + public: + FalseyErrorStream() {} + + template <typename T> + FalseyErrorStream& operator<<(const T& value) { + mBuffer << value; + return *this; + } + + ~FalseyErrorStream() { LOG(ERROR) << mBuffer.str(); } + + operator bool() const { return false; } + + private: + std::ostringstream mBuffer; +}; + +template <HalVersion version> +struct VersionedType {}; + +template <> +struct VersionedType<HalVersion::V1_2> { + using OperandPerformance = hal::V1_2::Capabilities::OperandPerformance; + using OperandType = hal::V1_2::OperandType; +}; + +template <> +struct VersionedType<HalVersion::V1_3> { + using OperandPerformance = hal::V1_3::Capabilities::OperandPerformance; + using OperandType = hal::V1_3::OperandType; +}; + +template <HalVersion version> +using VersionedOperandPerformance = typename VersionedType<version>::OperandPerformance; +template <HalVersion version> +using VersionedOperandType = typename VersionedType<version>::OperandType; + +} // namespace + +// Return a vector with one entry for each non-extension OperandType except +// SUBGRAPH, set to the specified PerformanceInfo value. The vector will be +// sorted by OperandType. +// +// Control flow (OperandType::SUBGRAPH) operation performance is specified +// separately using Capabilities::ifPerformance and +// Capabilities::whilePerformance. +template <HalVersion version> +hal::hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance( + hal::PerformanceInfo perf); + +// Update the vector entry corresponding to the specified OperandType with the +// specified PerformanceInfo value. The vector must already have an entry for +// that OperandType, and must be sorted by OperandType. +void update(hal::hidl_vec<hal::V1_2::Capabilities::OperandPerformance>* operandPerformance, + hal::V1_2::OperandType type, hal::PerformanceInfo perf); +void update(hal::hidl_vec<hal::V1_3::Capabilities::OperandPerformance>* operandPerformance, + hal::V1_3::OperandType type, hal::PerformanceInfo perf); + +// Look for a vector entry corresponding to the specified OperandType. If +// found, return the associated PerformanceInfo. If not, return a pessimistic +// PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType. +hal::PerformanceInfo lookup( + const hal::hidl_vec<hal::V1_2::Capabilities::OperandPerformance>& operandPerformance, + hal::V1_2::OperandType type); +hal::PerformanceInfo lookup( + const hal::hidl_vec<hal::V1_3::Capabilities::OperandPerformance>& operandPerformance, + hal::V1_3::OperandType type); + +// Returns true if an operand type is an extension type. +bool isExtensionOperandType(hal::OperandType type); + +// Returns true if an operation type is an extension type. +bool isExtensionOperationType(hal::OperationType type); + +// Returns the amount of space needed to store a value of the specified +// dimensions and type. For a tensor with unspecified rank or at least one +// unspecified dimension, returns zero. +// +// Aborts if the specified type is an extension type. +// Aborts if the size would overflow the return type. +// +// See also TypeManager::getSizeOfData(OperandType, const std::vector<uint32_t>&). +uint32_t nonExtensionOperandSizeOfData(hal::OperandType type, + const std::vector<uint32_t>& dimensions); + +// Returns the amount of space needed to store a value of the dimensions and +// type of this operand. For a tensor with unspecified rank or at least one +// unspecified dimension, returns zero. +// +// Aborts if the specified type is an extension type. +// Aborts if the size would overflow the return type. +// +// See also TypeManager::getSizeOfData(const Operand&). +inline uint32_t nonExtensionOperandSizeOfData(const hal::Operand& operand) { + return nonExtensionOperandSizeOfData(operand.type, operand.dimensions); +} + +// Returns the amount of space needed to store a value of the specified +// dimensions and element size. For a tensor with unspecified rank or at least +// one unspecified dimension, returns zero. +// +// Aborts if the size would overflow the return type. +// +// See also TypeManager::getSizeOfData(const Operand&). +uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& dimensions); + +// Returns true if the amount of space needed to store a value of the specified +// dimensions and element size overflows the uint32_t type. +// +// Aborts if the specified type is an extension type. +// +// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&). +bool nonExtensionOperandSizeOfDataOverflowsUInt32(hal::OperandType type, + const std::vector<uint32_t>& dimensions); + +// Returns true if the amount of space needed to store a value of the specified +// dimensions and element size overflows the uint32_t type. +// +// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector<uint32_t>&). +bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector<uint32_t>& dimensions); + +// Returns true if a non-extension operand type is a scalar type. +// +// Aborts if the specified type is an extension type. +// +// See also TypeManager::isTensorType(OperandType). +bool nonExtensionOperandTypeIsScalar(int type); + +// Returns the name of the operation type in ASCII. +std::string getOperationName(hal::OperationType opCode); + +// Returns the name of the operand type in ASCII. +std::string getOperandTypeName(hal::OperandType type); + +// Whether an operand of tensor type has unspecified dimensions. +// +// Undefined behavior if the operand type is a scalar type. +bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount); +bool tensorHasUnspecifiedDimensions(hal::OperandType type, const std::vector<uint32_t>& dimensions); +bool tensorHasUnspecifiedDimensions(const hal::Operand& operand); +bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type); + +// Returns the number of padding bytes needed to align data of the +// specified length. It aligns object of length: +// 2, 3 on a 2 byte boundary, +// 4+ on a 4 byte boundary. +// We may want to have different alignments for tensors. +// TODO: This is arbitrary, more a proof of concept. We need +// to determine what this should be. +uint32_t alignBytesNeeded(uint32_t index, size_t length); + +// Does a detailed LOG(INFO) of the model +void logModelToInfo(const hal::V1_0::Model& model); +void logModelToInfo(const hal::V1_1::Model& model); +void logModelToInfo(const hal::V1_2::Model& model); +void logModelToInfo(const hal::V1_3::Model& model); + +inline std::string toString(uint32_t obj) { + return std::to_string(obj); +} + +template <typename Type> +std::string toString(const std::vector<Type>& range) { + std::string os = "["; + for (size_t i = 0; i < range.size(); ++i) { + os += (i == 0 ? "" : ", ") + toString(range[i]); + } + return os += "]"; +} + +template <typename A, typename B> +std::string toString(const std::pair<A, B>& pair) { + std::ostringstream oss; + oss << "(" << toString(pair.first) << ", " << toString(pair.second) << ")"; + return oss.str(); +} + +inline std::string toString(HalVersion halVersion) { + switch (halVersion) { + case HalVersion::UNKNOWN: + return "UNKNOWN HAL version"; + case HalVersion::V1_0: + return "HAL version 1.0"; + case HalVersion::V1_1: + return "HAL version 1.1"; + case HalVersion::V1_2: + return "HAL version 1.2"; + case HalVersion::V1_3: + return "HAL version 1.3"; + } +} + +inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) { + return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM); +} + +bool validateOperandSymmPerChannelQuantParams( + const hal::Operand& halOperand, + const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag); + +// Validates an operand type. +// +// extensionOperandTypeInfo must be nullptr iff the type is not an extension type. +// +// If allowPartial is true, the dimensions may be underspecified. +int validateOperandType( + const ANeuralNetworksOperandType& type, + const hal::Extension::OperandTypeInformation* const extensionOperandTypeInfo, + const char* tag, bool allowPartial); +int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount, + const char* tag); + +// A set of functions to help validate models containing IF or WHILE operations. +struct SubgraphValidationHelper { + // Checks if a given operand is a SUBGRAPH operand with a valid offset. + std::function<bool(const hal::Operand&)> isValidSubgraphReference; + // Gets the input count of a subgraph referenced by a given operand. + std::function<uint32_t(const hal::Operand&)> getSubgraphInputCount; + // Gets the output count of a subgraph referenced by a given operand. + std::function<uint32_t(const hal::Operand&)> getSubgraphOutputCount; + // Gets the specified input operand of a subgraph referenced by a given operand. + std::function<const hal::Operand*(const hal::Operand&, uint32_t)> getSubgraphInputOperand; + // Gets the specified output operand of a subgraph referenced by a given operand. + std::function<const hal::Operand*(const hal::Operand&, uint32_t)> getSubgraphOutputOperand; + // Whether control flow operations with inner or outer input or output + // operands of unknown size are allowed. + bool allowControlFlowOperationWithOperandOfUnknownSize; +}; + +// Returns ANEURALNETWORKS_NO_ERROR if the corresponding operation is defined and can handle the +// provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA. +// The last argument is only used for validating IF and WHILE operations. +int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, + const uint32_t* inputIndexes, uint32_t outputCount, + const uint32_t* outputIndexes, const std::vector<hal::Operand>& operands, + HalVersion halVersion, const SubgraphValidationHelper& helper); + +inline size_t getSizeFromInts(int lower, int higher) { + return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32); +} + +// Convert ANEURALNETWORKS_* result code to ErrorStatus. +// Not guaranteed to be a 1-to-1 mapping. +hal::ErrorStatus convertResultCodeToErrorStatus(int resultCode); + +// Convert ErrorStatus to ANEURALNETWORKS_* result code. +// Not guaranteed to be a 1-to-1 mapping. +int convertErrorStatusToResultCode(hal::ErrorStatus status); + +// Convert execution results to runtime format. Additionally checks that the +// returned results abide by the HAL specification, and logs an error if the +// result violates the specification. +std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> getExecutionResult( + hal::ErrorStatus status, std::vector<hal::OutputShape> outputShapes, hal::Timing timing); + +// Combine two tensor dimensions, both may have unspecified dimensions or rank. +std::optional<std::vector<uint32_t>> combineDimensions(const std::vector<uint32_t>& lhs, + const std::vector<uint32_t>& rhs); + +// Versioning + +bool compliantWithV1_0(const hal::V1_0::Capabilities& capabilities); +bool compliantWithV1_0(const hal::V1_1::Capabilities& capabilities); +bool compliantWithV1_0(const hal::V1_2::Capabilities& capabilities); +bool compliantWithV1_0(const hal::V1_3::Capabilities& capabilities); +bool compliantWithV1_1(const hal::V1_0::Capabilities& capabilities); +bool compliantWithV1_1(const hal::V1_1::Capabilities& capabilities); +bool compliantWithV1_1(const hal::V1_2::Capabilities& capabilities); +bool compliantWithV1_1(const hal::V1_3::Capabilities& capabilities); +bool compliantWithV1_2(const hal::V1_0::Capabilities& capabilities); +bool compliantWithV1_2(const hal::V1_1::Capabilities& capabilities); +bool compliantWithV1_2(const hal::V1_2::Capabilities& capabilities); +bool compliantWithV1_2(const hal::V1_3::Capabilities& capabilities); +bool compliantWithV1_3(const hal::V1_0::Capabilities& capabilities); +bool compliantWithV1_3(const hal::V1_1::Capabilities& capabilities); +bool compliantWithV1_3(const hal::V1_2::Capabilities& capabilities); +bool compliantWithV1_3(const hal::V1_3::Capabilities& capabilities); + +// If noncompliantOperations != nullptr, then +// precondition: noncompliantOperations->empty() +// postcondition: *noncompliantOperations consists of the indices of the noncompliant +// operations; if the compliance check fails for some reason +// other than a noncompliant operation, +// *noncompliantOperations consists of the indices of all operations +bool compliantWithV1_0(const hal::V1_0::Model& model); +bool compliantWithV1_0(const hal::V1_1::Model& model); +bool compliantWithV1_0(const hal::V1_2::Model& model, + std::set<uint32_t>* noncompliantOperations = nullptr); +bool compliantWithV1_0(const hal::V1_3::Model& model, + std::set<uint32_t>* noncompliantOperations = nullptr); +bool compliantWithV1_1(const hal::V1_0::Model& model); +bool compliantWithV1_1(const hal::V1_1::Model& model); +bool compliantWithV1_1(const hal::V1_2::Model& model, + std::set<uint32_t>* noncompliantOperations = nullptr); +bool compliantWithV1_1(const hal::V1_3::Model& model, + std::set<uint32_t>* noncompliantOperations = nullptr); +bool compliantWithV1_2(const hal::V1_0::Model& model); +bool compliantWithV1_2(const hal::V1_1::Model& model); +bool compliantWithV1_2(const hal::V1_2::Model& model, + std::set<uint32_t>* noncompliantOperations = nullptr); +bool compliantWithV1_2(const hal::V1_3::Model& model, + std::set<uint32_t>* noncompliantOperations = nullptr); + +hal::V1_0::ErrorStatus convertToV1_0(hal::V1_0::ErrorStatus status); +hal::V1_0::ErrorStatus convertToV1_0(hal::V1_3::ErrorStatus status); +hal::V1_3::ErrorStatus convertToV1_3(hal::V1_0::ErrorStatus status); +hal::V1_3::ErrorStatus convertToV1_3(hal::V1_3::ErrorStatus status); + +hal::V1_0::Capabilities convertToV1_0(const hal::V1_0::Capabilities& capabilities); +hal::V1_0::Capabilities convertToV1_0(const hal::V1_1::Capabilities& capabilities); +hal::V1_0::Capabilities convertToV1_0(const hal::V1_2::Capabilities& capabilities); +hal::V1_0::Capabilities convertToV1_0(const hal::V1_3::Capabilities& capabilities); +hal::V1_1::Capabilities convertToV1_1(const hal::V1_0::Capabilities& capabilities); +hal::V1_1::Capabilities convertToV1_1(const hal::V1_1::Capabilities& capabilities); +hal::V1_1::Capabilities convertToV1_1(const hal::V1_2::Capabilities& capabilities); +hal::V1_1::Capabilities convertToV1_1(const hal::V1_3::Capabilities& capabilities); +hal::V1_2::Capabilities convertToV1_2(const hal::V1_0::Capabilities& capabilities); +hal::V1_2::Capabilities convertToV1_2(const hal::V1_1::Capabilities& capabilities); +hal::V1_2::Capabilities convertToV1_2(const hal::V1_2::Capabilities& capabilities); +hal::V1_2::Capabilities convertToV1_2(const hal::V1_3::Capabilities& capabilities); +hal::V1_3::Capabilities convertToV1_3(const hal::V1_0::Capabilities& capabilities); +hal::V1_3::Capabilities convertToV1_3(const hal::V1_1::Capabilities& capabilities); +hal::V1_3::Capabilities convertToV1_3(const hal::V1_2::Capabilities& capabilities); +hal::V1_3::Capabilities convertToV1_3(const hal::V1_3::Capabilities& capabilities); + +hal::V1_0::Model convertToV1_0(const hal::V1_0::Model& model); +hal::V1_0::Model convertToV1_0(const hal::V1_1::Model& model); +hal::V1_0::Model convertToV1_0(const hal::V1_2::Model& model); +hal::V1_0::Model convertToV1_0(const hal::V1_3::Model& model); +hal::V1_1::Model convertToV1_1(const hal::V1_0::Model& model); +hal::V1_1::Model convertToV1_1(const hal::V1_1::Model& model); +hal::V1_1::Model convertToV1_1(const hal::V1_2::Model& model); +hal::V1_1::Model convertToV1_1(const hal::V1_3::Model& model); +hal::V1_2::Model convertToV1_2(const hal::V1_0::Model& model); +hal::V1_2::Model convertToV1_2(const hal::V1_1::Model& model); +hal::V1_2::Model convertToV1_2(const hal::V1_2::Model& model); +hal::V1_2::Model convertToV1_2(const hal::V1_3::Model& model); +hal::V1_3::Model convertToV1_3(const hal::V1_0::Model& model); +hal::V1_3::Model convertToV1_3(const hal::V1_1::Model& model); +hal::V1_3::Model convertToV1_3(const hal::V1_2::Model& model); +hal::V1_3::Model convertToV1_3(const hal::V1_3::Model& model); + +hal::V1_0::OperationType uncheckedConvertToV1_0(hal::V1_3::OperationType type); +hal::V1_1::OperationType uncheckedConvertToV1_1(hal::V1_3::OperationType type); +hal::V1_2::OperationType uncheckedConvertToV1_2(hal::V1_3::OperationType type); + +hal::V1_0::Operand convertToV1_0(const hal::V1_2::Operand& operand); +hal::V1_0::Operand convertToV1_0(const hal::V1_3::Operand& operand); +hal::V1_2::Operand convertToV1_2(const hal::V1_0::Operand& operand); +hal::V1_2::Operand convertToV1_2(const hal::V1_3::Operand& operand); +hal::V1_3::Operand convertToV1_3(const hal::V1_0::Operand& operand); +hal::V1_3::Operand convertToV1_3(const hal::V1_2::Operand& operand); +hal::V1_3::Operand convertToV1_3(const hal::V1_3::Operand& operand); + +hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_0::Operand>& operands); +hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_2::Operand>& operands); +hal::hidl_vec<hal::V1_0::Operand> convertToV1_0(const hal::hidl_vec<hal::V1_3::Operand>& operands); +hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_0::Operand>& operands); +hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_2::Operand>& operands); +hal::hidl_vec<hal::V1_2::Operand> convertToV1_2(const hal::hidl_vec<hal::V1_3::Operand>& operands); +hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_0::Operand>& operands); +hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_2::Operand>& operands); +hal::hidl_vec<hal::V1_3::Operand> convertToV1_3(const hal::hidl_vec<hal::V1_3::Operand>& operands); + +bool compliantWithV1_0(const hal::V1_0::Request& request); +bool compliantWithV1_0(const hal::V1_3::Request& request); +bool compliantWithV1_2(const hal::V1_3::Request& request); + +hal::V1_0::Request convertToV1_0(const hal::V1_0::Request& request); +hal::V1_0::Request convertToV1_0(const hal::V1_3::Request& request); +hal::V1_0::Request convertToV1_2(const hal::V1_3::Request& request); +hal::V1_3::Request convertToV1_3(const hal::V1_0::Request& request); +hal::V1_3::Request convertToV1_3(const hal::V1_3::Request& request); + +bool compliantWithV1_0(hal::V1_0::OperandLifeTime lifetime); +bool compliantWithV1_0(hal::V1_3::OperandLifeTime lifetime); +bool compliantWithV1_3(hal::V1_0::OperandLifeTime lifetime); +bool compliantWithV1_3(hal::V1_3::OperandLifeTime lifetime); + +hal::V1_0::OperandLifeTime convertToV1_0(hal::V1_0::OperandLifeTime lifetime); +hal::V1_0::OperandLifeTime convertToV1_0(hal::V1_3::OperandLifeTime lifetime); +hal::V1_3::OperandLifeTime convertToV1_3(hal::V1_0::OperandLifeTime lifetime); +hal::V1_3::OperandLifeTime convertToV1_3(hal::V1_3::OperandLifeTime lifetime); + +constexpr hal::Priority convertToHalPriority(int32_t priority) { + switch (priority) { + case ANEURALNETWORKS_PRIORITY_LOW: + return hal::Priority::LOW; + case ANEURALNETWORKS_PRIORITY_MEDIUM: + return hal::Priority::MEDIUM; + case ANEURALNETWORKS_PRIORITY_HIGH: + return hal::Priority::HIGH; + } + LOG(FATAL) << "unrecognized priority: " << priority; + return {}; +} + +// The function syncWait() has the same semantics as the system function +// ::sync_wait(), except that the syncWait() return value is semantically +// richer. The timeout parameter is in msecs. +enum class FenceState { + ACTIVE, // fence has not been signaled + SIGNALED, // fence has been signaled + ERROR, // fence has been placed in the error state + UNKNOWN, // either bad argument passed to syncWait(), or internal error +}; +FenceState syncWait(int fd, int timeout); + +#ifdef NN_DEBUGGABLE +uint32_t getProp(const char* str, uint32_t defaultValue = 0); +#endif // NN_DEBUGGABLE + +} // namespace nn +} // namespace android #endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H
diff --git a/common/include/ValidateHal.h b/common/include/ValidateHal.h index 7d9a50f..32d7662 100644 --- a/common/include/ValidateHal.h +++ b/common/include/ValidateHal.h
@@ -17,20 +17,25 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_VALIDATE_HAL_H #define ANDROID_FRAMEWORKS_ML_NN_COMMON_VALIDATE_HAL_H -#include <nnapi/TypeUtils.h> -#include <nnapi/Validation.h> - #include <set> #include <tuple> #include "HalInterfaces.h" -#include "nnapi/TypeUtils.h" -#include "nnapi/Validation.h" namespace android { namespace nn { -using HalPreparedModelRole = std::tuple<const V1_3::IPreparedModel*, IOType, uint32_t>; +enum class HalVersion : int32_t { + UNKNOWN, + V1_0, + V1_1, + V1_2, + V1_3, + LATEST = V1_3, +}; + +enum class IOType { INPUT, OUTPUT }; +using PreparedModelRole = std::tuple<const hal::IPreparedModel*, IOType, uint32_t>; // 1.3 HAL does not support control flow operations with operands of unknown size. // See http://b/132458982#comment63. @@ -57,35 +62,35 @@ bool allowUnspecifiedOutput = true); // Verifies that the execution preference is valid. -bool validateExecutionPreference(V1_1::ExecutionPreference preference); +bool validateExecutionPreference(hal::ExecutionPreference preference); // Verifies that the priority is valid. -bool validatePriority(V1_3::Priority priority); +bool validatePriority(hal::Priority priority); -bool validOperationType(V1_0::OperationType operation); -bool validOperationType(V1_1::OperationType operation); -bool validOperationType(V1_2::OperationType operation); +bool validOperationType(hal::V1_0::OperationType operation); +bool validOperationType(hal::V1_1::OperationType operation); +bool validOperationType(hal::V1_2::OperationType operation); -bool validOperandType(V1_0::OperandType operand); -bool validOperandType(V1_2::OperandType operand); -bool validOperandType(V1_3::OperandType operand); +bool validOperandType(hal::V1_0::OperandType operand); +bool validOperandType(hal::V1_2::OperandType operand); +bool validOperandType(hal::V1_3::OperandType operand); // Verifies that the memory pool is valid in the specified HAL version. -bool validatePool(const hardware::hidl_memory& pool, HalVersion ver = HalVersion::LATEST); -bool validatePool(const V1_3::Request::MemoryPool& pool, HalVersion ver = HalVersion::LATEST); +bool validatePool(const hal::hidl_memory& pool, HalVersion ver = HalVersion::LATEST); +bool validatePool(const hal::V1_3::Request::MemoryPool& pool, HalVersion ver = HalVersion::LATEST); // Verifies that the input arguments to IDevice::allocate are valid. // Optionally, this function can return a flattened prepared model roles and a combined operand. // Pass nullptr if either value is not needed. // IMPORTANT: This function cannot validate dimensions and extraParams with extension operand type. // Each driver should do their own validation of extension type dimensions and extraParams. -bool validateMemoryDesc(const V1_3::BufferDesc& desc, - const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels, - const hardware::hidl_vec<V1_3::BufferRole>& inputRoles, - const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, - std::function<const V1_3::Model*(const sp<V1_3::IPreparedModel>&)> getModel, - std::set<HalPreparedModelRole>* preparedModelRoles, - V1_3::Operand* combinedOperand); +bool validateMemoryDesc( + const hal::V1_3::BufferDesc& desc, + const hal::hidl_vec<sp<hal::V1_3::IPreparedModel>>& preparedModels, + const hal::hidl_vec<hal::V1_3::BufferRole>& inputRoles, + const hal::hidl_vec<hal::V1_3::BufferRole>& outputRoles, + std::function<const hal::V1_3::Model*(const sp<hal::V1_3::IPreparedModel>&)> getModel, + std::set<PreparedModelRole>* preparedModelRoles, hal::V1_3::Operand* combinedOperand); } // namespace nn } // namespace android
diff --git a/common/include/nnapi/IBuffer.h b/common/include/nnapi/IBuffer.h deleted file mode 100644 index 2386160..0000000 --- a/common/include/nnapi/IBuffer.h +++ /dev/null
@@ -1,79 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_IBUFFER_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_IBUFFER_H - -#include "nnapi/Result.h" -#include "nnapi/Types.h" - -namespace android::nn { - -/** - * This interface represents a device memory buffer. - * - * This interface is thread-safe, and any class that implements this interface must be thread-safe. - */ -class IBuffer { - public: - /** - * Retrieves the token corresponding to this buffer. - * - * @return MemoryDomainToken corresponding to this buffer. - */ - virtual Request::MemoryDomainToken getToken() const = 0; - - /** - * Retrieves the content of this buffer to a shared memory region. - * - * The IBuffer object must have been initialized before the call to IBuffer::copyTo. - * For more information on the state of the IBuffer object, refer to IDevice::allocate. - * - * @param dst The destination shared memory region. - * @return Nothing on success, otherwise GeneralError. - */ - virtual GeneralResult<void> copyTo(const SharedMemory& dst) const = 0; - - /** - * Sets the content of this buffer from a shared memory region. - * - * @param src The source shared memory region. - * @param dimensions Updated dimensional information. If the dimensions of the IBuffer object - * are not fully specified, then the dimensions must be fully specified here. If the - * dimensions of the IBuffer object are fully specified, then the dimensions may be empty - * here. If dimensions.size() > 0, then all dimensions must be specified here, and any - * dimension that was specified in the IBuffer object must have the same value here. - * @return Nothing on success, otherwise GeneralError. - */ - virtual GeneralResult<void> copyFrom(const SharedMemory& src, - const Dimensions& dimensions) const = 0; - - // Public virtual destructor to allow objects to be stored (and destroyed) as smart pointers. - // E.g., std::unique_ptr<IBuffer>. - virtual ~IBuffer() = default; - - protected: - // Protect the non-destructor special member functions to prevent object slicing. - IBuffer() = default; - IBuffer(const IBuffer&) = default; - IBuffer(IBuffer&&) noexcept = default; - IBuffer& operator=(const IBuffer&) = default; - IBuffer& operator=(IBuffer&&) noexcept = default; -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_IBUFFER_H
diff --git a/common/include/nnapi/IBurst.h b/common/include/nnapi/IBurst.h deleted file mode 100644 index 6afc594..0000000 --- a/common/include/nnapi/IBurst.h +++ /dev/null
@@ -1,155 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_IBURST_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_IBURST_H - -#include <android-base/scopeguard.h> - -#include <functional> -#include <memory> -#include <optional> -#include <utility> -#include <vector> - -#include "nnapi/Types.h" - -namespace android::nn { - -/** - * IBurst represents a burst execution object. - * - * Burst executions are a sequence of executions of the same prepared model that occur in rapid - * succession, such as frames of a camera capture or successive audio samples. A burst object is - * used to control a set of burst executions, and to preserve resources between executions, enabling - * executions to have lower overhead. Burst objects enable some optimizations: - * (1) A burst object is created before a sequence of executions, and freed when the sequence has - * ended. Because of this, the lifetime of the burst object hints to a driver how long it should - * remain in a high performance state. - * (2) A burst object can preserve resources between executions. For example, a driver can map a - * memory object on the first execution and cache the mapping in the burst object for reuse in - * subsequent executions. Any cached resource can be released when the burst object is destroyed - * or when the NNAPI runtime notifies the burst object that the resource is no longer required. - * (3) A burst object may be used for at most one execution at a time. This enables any transient - * execution resources such as intermediate tensors to be allocated once when the burst object - * is created and freed when the burst object is destroyed. - * - * This interface is thread-safe, and any class that implements this interface must be thread-safe. - */ -class IBurst { - public: - using OptionalCacheHold = std::shared_ptr<const base::ScopeGuard<std::function<void()>>>; - - /** - * Cache a memory object in the burst. - * - * This can enable multiple executions that reuse the same memory to be more efficient. - * - * @param memory The memory object to be cached as long as CacheHandle is held. - * @return An optional cache handle that will release the corresponding cahced object once the - * cache handle is released, or nullptr. - */ - virtual OptionalCacheHold cacheMemory(const SharedMemory& memory) const = 0; - - /** - * Performs a synchronous execution on a prepared model. - * - * At most one execution may occur on a burst object at any given time. - * - * The execution is performed synchronously with respect to the caller. IBurst::execute must - * verify the inputs to the function are correct. If there is an error, IBurst::execute must - * immediately return {@link ErrorStatus::INVALID_ARGUMENT} as a ExecutionError. If the inputs - * to the function are valid and there is no error, IBurst::execute must perform the execution, - * and must not return until the execution is complete. - * - * The caller must not change the content of any data object referenced by request (described by - * the {@link DataLocation} of a {@link RequestArgument}) until IBurst::execute returns. - * IBurst::execute must not change the content of any of the data objects corresponding to - * request inputs. - * - * If the prepared model was prepared from a model wherein all tensor operands have fully - * specified dimensions, and the inputs to the function are valid, and at execution time every - * operation's input operands have legal values, then the execution should complete - * successfully. There must be no failure unless the device itself is in a bad state. - * - * @param request The input and output information on which the prepared model is to be - * executed. - * @param measure Specifies whether or not to measure duration of the execution. - * @param deadline Optional time point. If provided, execute is expected to complete by this - * time point. If it is not able to be completed by the deadline, the execution may be - * aborted. - * @param loopTimeoutDuration The maximum amount of time that should be spent executing a {@link - * OperationType::WHILE} operation. If a loop condition model does not output `false` within - * this duration, the execution must be aborted. If no loop timeout duration is provided, - * the maximum amount of time is {@link kControlFlowTimeoutDefault}. When provided, the - * duration must not exceed {@link kControlFlowTimeoutMaximum}. - * @return A pair consisting of: - * - A list of shape information of model output operands. The index into "outputShapes" - * corresponds to the index of the output operand in the Request outputs vector. - * outputShapes must be empty unless the execution is successful or the ExecutionResult is - * {@link ErrorStatus::OUTPUT_INSUFFICIENT_SIZE}. outputShapes may be empty if the - * execution is successful and all model output operands are fully-specified at execution - * time. outputShapes must have the same number of elements as the number of model output - * operands if the ExecutionResult is {@link ErrorStatus::OUTPUT_INSUFFICIENT_SIZE}, or if - * the execution is successful and the model has at least one output operand that is not - * fully-specified. - * - Duration of execution. Unless measure is YES and the execution is successful, all times - * must be reported as std::nullopt. A driver may choose to report any time as - * std::nullopt, indicating that measurement is not available. - */ - virtual ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> execute( - const Request& request, MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const = 0; - - /** - * Create a reusable burst execution object. - * - * IBurst::createReusableExecution must verify the inputs to the function are correct. If there - * is an error, IBurst::createReusableExecution must immediately return {@link - * ErrorStatus::INVALID_ARGUMENT} as a GeneralError. If the inputs to the function are valid and - * there is no error, IBurst::createReusableExecution must construct a reusable execution. - * - * @param request The input and output information on which the prepared model is to be - * executed. - * @param measure Specifies whether or not to measure duration of the computation. - * @param loopTimeoutDuration The maximum amount of time that should be spent executing a {@link - * OperationType::WHILE} operation. If a loop condition model does not output `false` within - * this duration, the execution must be aborted. If no loop timeout duration is provided, - * the maximum amount of time is {@link kControlFlowTimeoutDefault}. When provided, the - * duration must not exceed {@link kControlFlowTimeoutMaximum}. - * @return execution An IExecution object representing a reusable burst execution that has been - * specialized for a fixed request, otherwise GeneralError. - */ - virtual GeneralResult<SharedExecution> createReusableExecution( - const Request& request, MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const = 0; - - // Public virtual destructor to allow objects to be stored (and destroyed) as smart pointers. - // E.g., std::unique_ptr<IBurst>. - virtual ~IBurst() = default; - - protected: - // Protect the non-destructor special member functions to prevent object slicing. - IBurst() = default; - IBurst(const IBurst&) = default; - IBurst(IBurst&&) noexcept = default; - IBurst& operator=(const IBurst&) = default; - IBurst& operator=(IBurst&&) noexcept = default; -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_IBURST_H
diff --git a/common/include/nnapi/IDevice.h b/common/include/nnapi/IDevice.h deleted file mode 100644 index 869255f..0000000 --- a/common/include/nnapi/IDevice.h +++ /dev/null
@@ -1,369 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_IDEVICE_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_IDEVICE_H - -#include <memory> -#include <string> -#include <utility> -#include <vector> - -#include "nnapi/Result.h" -#include "nnapi/Types.h" - -namespace android::nn { - -// Forward declarations -class IBuffer; -class IPreparedModel; - -/** - * This interface represents a device driver. - * - * This interface is thread-safe, and any class that implements this interface must be thread-safe. - */ -class IDevice { - public: - /** - * Returns the name of the driver. - * - * @return Name of the driver. - */ - virtual const std::string& getName() const = 0; - - /** - * Get the version string of the driver implementation. - * - * The version string must be a unique token among the set of version strings of drivers of a - * specific device. The token identifies the device driver's implementation. The token must not - * be confused with the feature level which is solely defined by the interface version. This API - * is opaque to the Android framework, but the Android framework may use the information for - * debugging or to pass on to NNAPI applications. - * - * Application developers sometimes have specific requirements to ensure good user experiences, - * and they need more information to make intelligent decisions when the Android framework - * cannot. For example, combined with the device name and other information, the token can help - * NNAPI applications filter devices based on their needs: - * - An application demands a certain level of performance, but a specific version of the driver - * cannot meet that requirement because of a performance regression. The application can - * disallow the driver based on the version provided. - * - An application has a minimum precision requirement, but certain versions of the driver - * cannot meet that requirement because of bugs or certain optimizations. The application can - * filter out versions of these drivers. - * - * @return version The version string of the device implementation. Must have nonzero length. - */ - virtual const std::string& getVersionString() const = 0; - - /** - * Returns the feature level of a driver. - * - * @return featureLevel The Version of the NNAPI specification this driver implements. - * See {@link ANeuralNetworks_getFeatureLevel} and {@link FeatureLevelCode} - * for NNAPI specification versioning information. - */ - virtual Version getFeatureLevel() const = 0; - - /** - * Returns the device type of a driver. - * - * The device type can be used to help application developers to distribute Machine Learning - * workloads and other workloads such as graphical rendering. E.g., for an app which renders AR - * scenes based on real time object detection results, the developer could choose an ACCELERATOR - * type device for ML workloads, and reserve GPU for graphical rendering. - * - * @return type The DeviceType of the device. Please note, this is not a bitfield of - * DeviceTypes. Each device must only be of a single DeviceType. - */ - virtual DeviceType getType() const = 0; - - /** - * Gets information about extensions supported by the driver implementation. - * - * Extensions of category ExtensionCategory::BASE must not appear in the list. - * - * All extension operations and operands must be fully supported for the extension to appear in - * the list of supported extensions. - * - * @return extensions A list of supported extensions. - */ - virtual const std::vector<Extension>& getSupportedExtensions() const = 0; - - /** - * Gets the capabilities of a driver. - * - * @return capabilities Capabilities of the driver. - */ - virtual const Capabilities& getCapabilities() const = 0; - - /** - * Gets the caching requirements of the driver implementation. - * - * There are two types of cache file descriptors provided to the driver: model cache and data - * cache. - * - * The data cache is for caching constant data, possibly including preprocessed and transformed - * tensor buffers. Any modification to the data cache should have no worse effect than - * generating bad output values at execution time. - * - * The model cache is for caching security-sensitive data such as compiled executable machine - * code in the device's native binary format. A modification to the model cache may affect the - * driver's execution behavior, and a malicious client could make use of this to execute beyond - * the granted permission. Thus, the driver must always check whether the model cache is - * corrupted before preparing the model from cache. - * - * IDevice::getNumberOfCacheFilesNeeded returns how many of each type of cache files the driver - * implementation needs to cache a single prepared model. Returning 0 for both types indicates - * compilation caching is not supported by this driver. The driver may still choose not to cache - * certain compiled models even if it reports that caching is supported. - * - * If the device reports that caching is not supported, the user may avoid calling - * IDevice::prepareModelFromCache or providing cache file descriptors to IDevice::prepareModel. - * - * @return A pair of: - * - numModelCache An unsigned integer indicating how many files for model cache the driver - * needs to cache a single prepared model. It must be less than or equal to - * ::android::nn::kMaxNumberOfCacheFiles. - * - numDataCache An unsigned integer indicating how many files for data cache the driver - * needs to cache a single prepared model. It must be less than or equal to - * ::android::nn::kMaxNumberOfCacheFiles. - */ - virtual std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const = 0; - - /** - * Blocks until the device is not in a bad state. - * - * @return Nothing on success, otherwise GeneralError. - */ - virtual GeneralResult<void> wait() const = 0; - - /** - * Gets the supported operations in a model. - * - * IDevice::getSupportedOperations indicates which operations of the top-level subgraph are - * fully supported by the vendor driver. If an operation may not be supported for any reason, - * IDevice::getSupportedOperations must return `false` for that operation. - * - * The {@link OperationType::IF} and {@link OperationType::WHILE} operations may only be fully - * supported if the vendor driver fully supports all operations in the referenced subgraphs. - * - * @param model A Model whose operations--and their corresponding operands--are to be verified - * by the driver. - * @return supportedOperations A list of supported operations, where `true` indicates the - * operation is supported and `false` indicates the operation is not supported. The index of - * "supported" corresponds with the index of the operation it is describing. - */ - virtual GeneralResult<std::vector<bool>> getSupportedOperations(const Model& model) const = 0; - - /** - * Creates a prepared model for execution. - * - * IDevice::prepareModel is used to make any necessary transformations or alternative - * representations to a model for execution, possibly including transformations on the constant - * data, optimization on the model's graph, or compilation into the device's native binary - * format. The model itself is not changed. - * - * Optionally, caching information may be provided for the driver to save the prepared model to - * cache files for faster model compilation time when the same model preparation is requested in - * the future. There are two types of cache file handles provided to the driver: model cache and - * data cache. For more information on the two types of cache handles, refer to - * IDevice::getNumberOfCacheFilesNeeded. - * - * The file descriptors must be opened with read and write permission. A file may have any size, - * and the corresponding file descriptor may have any offset. The driver must truncate a file to - * zero size before writing to that file. The file descriptors may be closed by the client once - * the preparation has finished. The driver must dup a file descriptor if it wants to get access - * to the cache file later. - * - * IDevice::prepareModel must verify its inputs related to preparing the model (as opposed to - * saving the prepared model to cache) are correct. If there is an error, IDevice::prepareModel - * must immediately return {@link ErrorStatus::INVALID_ARGUMENT} as a GeneralError. If the - * inputs to IDevice::prepareModel are valid and there is no error, IDevice::prepareModel must - * prepare the model. - * - * The model is prepared with a priority. This priority is relative to other prepared models - * owned by the same client. Higher priority executions may use more compute resources than - * lower priority executions, and may preempt or starve lower priority executions. - * - * IDevice::prepareModel can be called with an optional deadline. If the model is not able to be - * prepared before the provided deadline, the model preparation may be aborted, and either - * {@link ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link - * ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned as a GeneralError. - * - * Optionally, the driver may save the prepared model to cache during the preparation. Any error - * that occurs when saving to cache must not affect the status of preparing the model. Even if - * the input arguments related to the cache may be invalid, or the driver may fail to save to - * cache, IDevice::prepareModel must finish preparing the model. The driver may choose not to - * save to cache even if the caching information is provided and valid. - * - * The only information that may be unknown to the model at this stage is the shape of the - * tensors, which may only be known at execution time. As such, some driver services may return - * partially prepared models, where the prepared model may only be finished when it is paired - * with a set of inputs to the model. Note that the same prepared model object may be used with - * different shapes of inputs on different (possibly concurrent) executions. - * - * @param model The model to be prepared for execution. - * @param preference Indicates the intended execution behavior of a prepared model. - * @param priority Priority of the prepared model relative to other prepared models owned by an - * application. - * @param deadline Optional time point. If provided, prepareModel is expected to complete by - * this time point. If it is not able to be completed by the deadline, the execution may be - * aborted. - * @param modelCache A vector of handles with each entry holding exactly one cache file - * descriptor for the security-sensitive cache. The length of the vector must either be 0 - * indicating that caching information is not provided, or match numModelCache returned from - * IDevice::getNumberOfCacheFilesNeeded. The cache handles will be provided in the same - * order when retrieving the preparedModel from cache files with - * IDevice::prepareModelFromCache. - * @param dataCache A vector of handles with each entry holding exactly one cache file - * descriptor for the constants' cache. The length of the vector must either be 0 indicating - * that caching information is not provided, or match numDataCache returned from - * IDevice::getNumberOfCacheFilesNeeded. The cache handles will be provided in the same - * order when retrieving the preparedModel from cache files with - * IDevice::prepareModelFromCache. - * @param token An caching token of length ::android::nn::kByteSizeOfCacheToken identifying the - * prepared model. The same token will be provided when retrieving the prepared model from - * the cache files with IDevice::prepareModelFromCache. Tokens should be chosen to have a - * low rate of collision for a particular application. The driver cannot detect a collision; - * a collision will result in a failed execution or in a successful execution that produces - * incorrect output values. If both modelCache and dataCache are empty indicating that - * caching information is not provided, this token must be ignored. - * @return preparedModel An IPreparedModel object representing a model that has been prepared - * for execution, otherwise GeneralError. - */ - virtual GeneralResult<SharedPreparedModel> prepareModel( - const Model& model, ExecutionPreference preference, Priority priority, - OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache, - const std::vector<SharedHandle>& dataCache, const CacheToken& token) const = 0; - - /** - * Creates a prepared model from cache files for execution. - * - * IDevice::prepareModelFromCache is used to retrieve a prepared model directly from cache files - * to avoid slow model compilation time. There are two types of cache file handles provided to - * the driver: model cache and data cache. For more information on the two types of cache - * handles, refer to IDevice::getNumberOfCacheFilesNeeded. - * - * The file descriptors must be opened with read and write permission. A file may have any size, - * and the corresponding file descriptor may have any offset. The driver must truncate a file to - * zero size before writing to that file. The file descriptors may be closed by the client once - * the preparation has finished. The driver must dup a file descriptor if it wants to get access - * to the cache file later. - * - * IDevice::prepareModelFromCache must verify its inputs are correct, and that the - * security-sensitive cache has not been modified since it was last written by the driver. If - * there is an error, or if compilation caching is not supported, or if the security-sensitive - * cache has been modified, IDevice::prepareModelFromCache must immediately return {@link - * ErrorStatus::INVALID_ARGUMENT} as a GeneralError. If the inputs to - * IDevice::prepareModelFromCache are valid, the security-sensitive cache is not modified, and - * there is no error, IDevice::prepareModelFromCache must prepare the model - * - * IDevice::prepareModelFromCache can be called with an optional deadline. If the model is not - * able to prepared before the provided deadline, the model preparation may be aborted, and - * either {@link ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link - * ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned as a GeneralError. - * - * The only information that may be unknown to the model at this stage is the shape of the - * tensors, which may only be known at execution time. As such, some driver services may return - * partially prepared models, where the prepared model may only be finished when it is paired - * with a set of inputs to the model. Note that the same prepared model object may be used with - * different shapes of inputs on different (possibly concurrent) executions. - * - * @param deadline Optional time point. If provided, prepareModel is expected to complete by - * this time point. If it is not able to be completed by the deadline, the execution may be - * aborted. - * @param modelCache A vector of handles with each entry holding exactly one cache file - * descriptor for the security-sensitive cache. The length of the vector must match the - * numModelCache returned from IDevice::getNumberOfCacheFilesNeeded. The cache handles will - * be provided in the same order as with IDevice::prepareModel. - * @param dataCache A vector of handles with each entry holding exactly one cache file - * descriptor for the constants' cache. The length of the vector must match the numDataCache - * returned from IDevice::getNumberOfCacheFilesNeeded. The cache handles will be provided in - * the same order as with IDevice::prepareModel. - * @param token A caching token of length ::android::nn::kByteSizeOfCacheToken identifying the - * prepared model. It is the same token provided when saving the cache files with - * IDevice::prepareModel. Tokens should be chosen to have a low rate of collision for a - * particular application. The driver cannot detect a collision; a collision will result in - * a failed execution or in a successful execution that produces incorrect output values. - * @return preparedModel An IPreparedModel object representing a model that has been prepared - * for execution, otherwise GeneralError. - */ - virtual GeneralResult<SharedPreparedModel> prepareModelFromCache( - OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache, - const std::vector<SharedHandle>& dataCache, const CacheToken& token) const = 0; - - /** - * Allocates a driver-managed buffer with the properties specified by the descriptor as well as - * the input and output roles of prepared models. - * - * IDevice::allocate must verify its inputs are correct. If there is an error, or if a certain - * role or property is not supported by the driver, IDevice::allocate must return with {@link - * ErrorStatus::INVALID_ARGUMENT} as a GeneralError. If the allocation is successful, this - * method must return the produced IBuffer. A successful allocation must accommodate all of the - * specified roles and buffer properties. - * - * The buffer is allocated as an uninitialized state. An uninitialized buffer may only be used - * in ways that are specified by outputRoles. A buffer is initialized after it is used as an - * output in a successful execution, or after a successful invocation of IBuffer::copyFrom on - * the buffer. An initialized buffer may be used according to all roles specified in inputRoles - * and outputRoles. A buffer will return to the uninitialized state if it is used as an output - * in a failed execution, or after a failed invocation of IBuffer::copyFrom on the buffer. - * - * The driver may deduce the dimensions of the buffer according to the buffer descriptor as well - * as the input and output roles. The dimensions or rank of the buffer may be unknown at this - * stage. As such, some driver services may only create a placeholder and defer the actual - * allocation until execution time. Note that the same buffer may be used for different shapes - * of outputs on different executions. When the buffer is used as an input, the input shape must - * be the same as the output shape from the last execution using this buffer as an output. - * - * The driver must apply proper validatation upon every usage of the buffer, and fail the - * execution immediately if the usage is illegal. - * - * @param desc A buffer descriptor specifying the properties of the buffer to allocate. - * @param preparedModels A vector of IPreparedModel objects. Must only contain IPreparedModel - * objects from the same IDevice as this method invoked on. - * @param inputRoles A vector of roles with each specifying an input to a prepared model. - * @param outputRoles A vector of roles with each specifying an output to a prepared model. - * Each role specified in inputRoles and outputRoles must be unique. The corresponding model - * operands of the roles must have the same OperandType, scale, zero point, and ExtraParams. - * The dimensions of the operands and the dimensions specified in the buffer descriptor must - * be compatible with each other. Two dimensions are incompatible if there is at least one - * axis that is fully specified in both but has different values. - * @return The allocated IBuffer object. If the buffer was unable to be allocated due to an - * error, a GeneralError is returned instead. - */ - virtual GeneralResult<SharedBuffer> allocate( - const BufferDesc& desc, const std::vector<SharedPreparedModel>& preparedModels, - const std::vector<BufferRole>& inputRoles, - const std::vector<BufferRole>& outputRoles) const = 0; - - // Public virtual destructor to allow objects to be stored (and destroyed) as smart pointers. - // E.g., std::unique_ptr<IDevice>. - virtual ~IDevice() = default; - - protected: - // Protect the non-destructor special member functions to prevent object slicing. - IDevice() = default; - IDevice(const IDevice&) = default; - IDevice(IDevice&&) noexcept = default; - IDevice& operator=(const IDevice&) = default; - IDevice& operator=(IDevice&&) noexcept = default; -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_IDEVICE_H
diff --git a/common/include/nnapi/IExecution.h b/common/include/nnapi/IExecution.h deleted file mode 100644 index ab9e59c..0000000 --- a/common/include/nnapi/IExecution.h +++ /dev/null
@@ -1,167 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_NNAPI_IEXECUTION_H -#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_NNAPI_IEXECUTION_H - -#include <utility> -#include <vector> - -#include "nnapi/Types.h" - -namespace android::nn { - -/** - * IExecution describes a reusable execution with request and several other execution configurations - * fixed. It is used to launch computations. - * - * IExecution manages a sequence of computations of the same prepared model with the same request - * and configurations. An IExecution object is used to control a set of computation, and to preserve - * resources between computations, enabling computations to have lower overhead. IExecution objects - * enable some optimizations: - * (1) An IExecution object can preserve resources between computations. For example, a driver can - * map a memory object when the IExecution object is created and cache the mapping for reuse in - * subsequent computations. Any cached resource can be released when the IExecution object is - * destroyed. - * (2) An IExecution object may be used for at most one computation at a time. This enables any - * transient computation resources such as intermediate tensors to be allocated once when the - * IExecution object is created and freed when the IExecution object is destroyed. - * (3) An IExecution object is created for a fixed request. This enables the implementation to apply - * request-specific optimizations. For example, an implementation can avoid request validation - * and conversions when the IExecution object is reused. An implementation may also choose to - * specialize the dynamic tensor shapes in a prepared model according to the request. - * - * This interface is thread-safe, and any class that implements this interface must be thread-safe. - */ -class IExecution { - public: - /** - * Performs a synchronous computation on a reusable execution. - * - * At most one computation may occur on a execution object at any given time, either by means of - * IExecution::compute or IExecution::computeFenced. - * - * The computation is performed synchronously with respect to the caller. - * IExecution::compute must verify the inputs to the function are correct. If there is an - * error, IExecution::compute must immediately return {@link ErrorStatus::INVALID_ARGUMENT} - * as a ExecutionError. If the inputs to the function are valid and there is no error, - * IExecution::compute must perform the computation, and must not return until the computation - * is complete. - * - * The caller must not change the content of any data object referenced by the request provided - * in IPreparedModel::createReusableExecution (described by the {@link DataLocation} of a {@link - * RequestArgument}) until IExecution::compute returns. IExecution::compute must not change the - * content of any of the data objects corresponding to request inputs. - * - * If the prepared model that the execution is created from was prepared from a model wherein - * all tensor operands have fully specified dimensions, and the inputs to the function are - * valid, and at computation time every operation's input operands have legal values, then the - * computation should complete successfully. There must be no failure unless the device itself - * is in a bad state. - * - * IExecution::compute may be called with an optional deadline. If the computation is not - * able to be completed before the provided deadline, the computation may be aborted, and either - * {@link ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link - * ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned as a ExecutionError. - * - * @param deadline Optional time point. If provided, compute is expected to complete by this - * time point. If it is not able to be completed by the deadline, the computation may be - * aborted. - * @return A pair consisting of: - * - A list of shape information of model output operands. The index into "outputShapes" - * corresponds to the index of the output operand in the Request outputs vector. - * outputShapes must be empty unless the computation is successful or the ExecutionResult - * is {@link ErrorStatus::OUTPUT_INSUFFICIENT_SIZE}. outputShapes may be empty if the - * computation is successful and all model output operands are fully-specified at - * computation time. outputShapes must have the same number of elements as the number of - * model output operands if the ExecutionResult is - * {@link ErrorStatus::OUTPUT_INSUFFICIENT_SIZE}, or if the computation is successful and - * the model has at least one output operand that is not fully-specified. - * - Duration of computation. Unless measure is YES and the computation is successful, all - * times must be reported as UINT64_MAX. A driver may choose to report any time as - * UINT64_MAX, indicating that measurement is not available. - */ - virtual ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> compute( - const OptionalTimePoint& deadline) const = 0; - - /** - * Launch a fenced asynchronous computation on a reusabled execution. - * - * At most one computation may occur on a execution object at any given time, either by means of - * IExecution::compute or IExecution::computeFenced. - * - * The computation is performed asynchronously with respect to the caller. - * IExecution::computeFenced must verify its inputs are correct, and the usages of memory - * pools allocated by IDevice::allocate are valid. If there is an error, - * IExecution::computeFenced must immediately return {@link ErrorStatus::INVALID_ARGUMENT} - * as a GeneralError. If the inputs to the function are valid and there is no error, - * IExecution::computeFenced must dispatch an asynchronous task to perform the computation in - * the background, and immediately return with a sync fence that will be signaled once the - * computation is completed and a callback that can be used by the client to query the duration - * and runtime error status. If the task has finished before the call returns, an empty handle - * may be returned for syncFence. The computation must wait for all the sync fences (if any) in - * waitFor to be signaled before starting the actual computation. - * - * When the asynchronous task has finished its computation, it must immediately signal the - * syncFence returned from the IExecution::computeFenced call. After the syncFence is - * signaled, the task must not modify the content of any data object referenced by request - * (described by the {@link DataLocation} of a {@link Request::Argument}). - * - * IExecution::computeFenced may be called with an optional deadline and an optional - * duration. If the computation is not able to be completed before the provided deadline or - * within the timeout duration (measured from when all sync fences in waitFor are signaled), - * whichever comes earlier, the computation may be aborted, and either {@link - * ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link ErrorStatus::MISSED_DEADLINE_PERSISTENT} - * may be returned as an GeneralError. The error due to an abort must be sent the same way as - * other errors, described above. - * - * If any of the sync fences in waitFor changes to error status after the - * IExecution::computeFenced call succeeds, or the computation is aborted because it cannot - * finish before the deadline has been reached or the duration has elapsed, the driver must - * immediately set the returned syncFence to error status. - * - * @param waitFor A vector of sync fence file descriptors. The computation must wait for all - * sync fence to be signaled before starting the task. - * @param deadline The time by which computation is expected to complete. If the computation - * cannot be finished by the deadline, the computation may be aborted. - * @param timeoutDurationAfterFence The timeout duration within which the computation is - * expected to complete after all sync fences in waitFor are signaled. - * @return A pair consisting of: - * - A syncFence that will be triggered when the task is completed. The syncFence will be - * set to error if critical error occurs when doing actual evaluation. - * - A callback can be used to query information like duration and detailed runtime error - * status when the task is completed. - */ - virtual GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>> computeFenced( - const std::vector<SyncFence>& waitFor, const OptionalTimePoint& deadline, - const OptionalDuration& timeoutDurationAfterFence) const = 0; - - // Public virtual destructor to allow objects to be stored (and destroyed) as smart pointers. - // E.g., std::unique_ptr<IExecution>. - virtual ~IExecution() = default; - - protected: - // Protect the non-destructor special member functions to prevent object slicing. - IExecution() = default; - IExecution(const IExecution&) = default; - IExecution(IExecution&&) noexcept = default; - IExecution& operator=(const IExecution&) = default; - IExecution& operator=(IExecution&&) noexcept = default; -}; - -} // namespace android::nn - -#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_COMMON_NNAPI_IEXECUTION_H
diff --git a/common/include/nnapi/IPreparedModel.h b/common/include/nnapi/IPreparedModel.h deleted file mode 100644 index 1995acb..0000000 --- a/common/include/nnapi/IPreparedModel.h +++ /dev/null
@@ -1,207 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_IPREPARED_MODEL_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_IPREPARED_MODEL_H - -#include <any> -#include <functional> -#include <memory> -#include <utility> -#include <vector> - -#include "nnapi/Types.h" - -namespace android::nn { - -/** - * IPreparedModel describes a model that has been prepared for execution and is used to launch - * executions. - * - * This interface is thread-safe, and any class that implements this interface must be thread-safe. - */ -class IPreparedModel { - public: - /** - * Performs a synchronous execution on a prepared model. - * - * The execution is performed synchronously with respect to the caller. IPreparedModel::execute - * must verify the inputs to the function are correct. If there is an error, - * IPreparedModel::execute must immediately return {@link ErrorStatus::INVALID_ARGUMENT} as a - * ExecutionError. If the inputs to the function are valid and there is no error, - * IPreparedModel::execute must perform the execution, and must not return until the execution - * is complete. - * - * The caller must not change the content of any data object referenced by request (described by - * the {@link DataLocation} of a {@link RequestArgument}) until IPreparedModel::execute returns. - * IPreparedModel::execute must not change the content of any of the data objects corresponding - * to request inputs. - * - * If the prepared model was prepared from a model wherein all tensor operands have fully - * specified dimensions, and the inputs to the function are valid, and at execution time every - * operation's input operands have legal values, then the execution should complete - * successfully. There must be no failure unless the device itself is in a bad state. - * - * IPreparedModel::execute may be called with an optional deadline. If the execution is not - * able to be completed before the provided deadline, the execution may be aborted, and either - * {@link ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link - * ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned as a ExecutionError. - * - * @param request The input and output information on which the prepared model is to be - * executed. - * @param measure Specifies whether or not to measure duration of the execution. - * @param deadline Optional time point. If provided, execute is expected to complete by this - * time point. If it is not able to be completed by the deadline, the execution may be - * aborted. - * @param loopTimeoutDuration The maximum amount of time that should be spent executing a {@link - * OperationType::WHILE} operation. If a loop condition model does not output `false` within - * this duration, the execution must be aborted. If no loop timeout duration is provided, - * the maximum amount of time is {@link kControlFlowTimeoutDefault}. When provided, the - * duration must not exceed {@link kControlFlowTimeoutMaximum}. - * @return A pair consisting of: - * - A list of shape information of model output operands. The index into "outputShapes" - * corresponds to the index of the output operand in the Request outputs vector. - * outputShapes must be empty unless the execution is successful or the ExecutionResult is - * {@link ErrorStatus::OUTPUT_INSUFFICIENT_SIZE}. outputShapes may be empty if the - * execution is successful and all model output operands are fully-specified at execution - * time. outputShapes must have the same number of elements as the number of model output - * operands if the ExecutionResult is {@link ErrorStatus::OUTPUT_INSUFFICIENT_SIZE}, or if - * the execution is successful and the model has at least one output operand that is not - * fully-specified. - * - Duration of execution. Unless measure is YES and the execution is successful, all times - * must be reported as UINT64_MAX. A driver may choose to report any time as UINT64_MAX, - * indicating that measurement is not available. - */ - virtual ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> execute( - const Request& request, MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration) const = 0; - - /** - * Launch a fenced asynchronous execution on a prepared model. - * - * The execution is performed asynchronously with respect to the caller. - * IPreparedModel::executeFenced must verify its inputs are correct, and the usages of memory - * pools allocated by IDevice::allocate are valid. If there is an error, - * IPreparedModel::executeFenced must immediately return {@link ErrorStatus::INVALID_ARGUMENT} - * as a GeneralError. If the inputs to the function are valid and there is no error, - * IPreparedModel::executeFenced must dispatch an asynchronous task to perform the execution in - * the background, and immediately return with a sync fence that will be signaled once the - * execution is completed and a callback that can be used by the client to query the duration - * and runtime error status. If the task has finished before the call returns, an empty handle - * may be returned for syncFence. The execution must wait for all the sync fences (if any) in - * waitFor to be signaled before starting the actual execution. - * - * When the asynchronous task has finished its execution, it must immediately signal the - * syncFence returned from the IPreparedModel::executeFenced call. After the syncFence is - * signaled, the task must not modify the content of any data object referenced by request - * (described by the {@link DataLocation} of a {@link Request::Argument}). - * - * IPreparedModel::executeFenced may be called with an optional deadline and an optional - * duration. If the execution is not able to be completed before the provided deadline or within - * the timeout duration (measured from when all sync fences in waitFor are signaled), whichever - * comes earlier, the execution may be aborted, and either {@link - * ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link ErrorStatus::MISSED_DEADLINE_PERSISTENT} - * may be returned as an GeneralError. The error due to an abort must be sent the same way as - * other errors, described above. - * - * If any of the sync fences in waitFor changes to error status after the - * IPreparedModel::executeFenced call succeeds, or the execution is aborted because it cannot - * finish before the deadline has been reached or the duration has elapsed, the driver must - * immediately set the returned syncFence to error status. - * - * @param request The input and output information on which the prepared model is to be - * executed. - * @param waitFor A vector of sync fence file descriptors. The execution must wait for all sync - * fence to be signaled before starting the task. - * @param measure Specifies whether or not to measure duration of the execution. - * @param deadline The time by which execution is expected to complete. If the execution cannot - * be finished by the deadline, the execution may be aborted. - * @param loopTimeoutDuration The maximum amount of time that should be spent executing a {@link - * OperationType::WHILE} operation. If a loop condition model does not output `false` within - * this duration, the execution must be aborted. If no loop timeout duration is provided, - * the maximum amount of time is {@link kControlFlowTimeoutDefault}. When provided, the - * duration must not exceed {@link kControlFlowTimeoutMaximum}. - * @param timeoutDurationAfterFence The timeout duration within which the execution is expected - * to complete after all sync fences in waitFor are signaled. - * @return A pair consisting of: - * - A syncFence that will be triggered when the task is completed. The syncFence will be - * set to error if critical error occurs when doing actual evaluation. - * - A callback can be used to query information like duration and detailed runtime error - * status when the task is completed. - */ - virtual GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>> executeFenced( - const Request& request, const std::vector<SyncFence>& waitFor, MeasureTiming measure, - const OptionalTimePoint& deadline, const OptionalDuration& loopTimeoutDuration, - const OptionalDuration& timeoutDurationAfterFence) const = 0; - - /** - * Create a reusable execution from a request and execution configurations. - * - * IPreparedModel::createReusableExecution must verify the inputs to the function are correct. - * If there is an error, IPreparedModel::createReusableExecution must immediately return - * {@link ErrorStatus::INVALID_ARGUMENT} as a GeneralError. If the inputs to the function are - * valid and there is no error, IPreparedModel::createReusableExecution must construct a - * reusable execution. - * - * @param request The input and output information on which the prepared model is to be - * executed. - * @param measure Specifies whether or not to measure duration of the computations performed - * with the returned execution. - * @param loopTimeoutDuration The maximum amount of time that should be spent executing a {@link - * OperationType::WHILE} operation. During a computation with the returned execution, if a - * loop condition model does not output `false` within this duration, the computation must - * be aborted. If no loop timeout duration is provided, the maximum amount of time is - * {@link LoopTimeoutDurationNs::DEFAULT}. When provided, the duration must not exceed - * {@link LoopTimeoutDurationNs::MAXIMUM}. - * @return execution An IExecution object representing a reusable execution that has been - * specialized for a fixed request, otherwise GeneralError. - */ - virtual GeneralResult<SharedExecution> createReusableExecution( - const Request& request, MeasureTiming measure, - const OptionalDuration& loopTimeoutDuration) const = 0; - - /** - * Creates a burst controller on a prepared model. - * - * @return ExecutionBurstController Execution burst controller object, otherwise GeneralError. - */ - virtual GeneralResult<SharedBurst> configureExecutionBurst() const = 0; - - /** - * Return the resource that the IPreparedModel wraps, or any empty std::any. - * - * This method is used for IDevice::allocate. - * - * @return std::any containing the underlying resource. - */ - virtual std::any getUnderlyingResource() const = 0; - - // Public virtual destructor to allow objects to be stored (and destroyed) as smart pointers. - // E.g., std::unique_ptr<IPreparedModel>. - virtual ~IPreparedModel() = default; - - protected: - // Protect the non-destructor special member functions to prevent object slicing. - IPreparedModel() = default; - IPreparedModel(const IPreparedModel&) = default; - IPreparedModel(IPreparedModel&&) noexcept = default; - IPreparedModel& operator=(const IPreparedModel&) = default; - IPreparedModel& operator=(IPreparedModel&&) noexcept = default; -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_IPREPARED_MODEL_H
diff --git a/common/include/nnapi/OperandTypes.h b/common/include/nnapi/OperandTypes.h deleted file mode 100644 index d8b2c2f..0000000 --- a/common/include/nnapi/OperandTypes.h +++ /dev/null
@@ -1,185 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERAND_TYPES_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERAND_TYPES_H - -namespace android::nn { - -/** - * Operand types. - * - * The type of an operand in a model. - * - * Types prefaced with TENSOR_* must be used for tensor data (i.e., tensors - * with at least one dimension). Types not prefaced by TENSOR_* represent - * scalar values and must have no dimensions. - * - * Although we define many types, most operators accept just a few - * types. Most used are {@link OperandType::TENSOR_FLOAT32}, - * {@link OperandType::TENSOR_QUANT8_ASYMM}, - * and {@link OperandType::INT32}. - */ -enum class OperandType { - /** A 32 bit floating point scalar value. */ - FLOAT32 = 0, - - /** A signed 32 bit integer scalar value. */ - INT32 = 1, - - /** An unsigned 32 bit integer scalar value. */ - UINT32 = 2, - - /** A tensor of 32 bit floating point values. */ - TENSOR_FLOAT32 = 3, - - /** A tensor of 32 bit integer values. */ - TENSOR_INT32 = 4, - - /** - * A tensor of 8 bit unsigned integers that represent real numbers. - * - * Attached to this tensor are two numbers that can be used to convert the - * 8 bit integer to the real value and vice versa. These two numbers are: - * - scale: a 32 bit floating point value greater than zero. - * - zeroPoint: a 32 bit integer, in range [0, 255]. - * - * The formula is: - * real_value = (integer_value - zeroPoint) * scale. - */ - TENSOR_QUANT8_ASYMM = 5, - - /** - * An 8 bit boolean scalar value. - * - * Values of this operand type are either true or false. A zero value - * represents false; any other value represents true. - */ - BOOL = 6, - - /** - * A tensor of 16 bit signed integers that represent real numbers. - * - * Attached to this tensor is a number representing real value scale that is - * used to convert the 16 bit number to a real value in the following way: - * realValue = integerValue * scale. - * - * scale is a 32 bit floating point with value greater than zero. - */ - TENSOR_QUANT16_SYMM = 7, - - /** - * A tensor of IEEE 754 16 bit floating point values. - */ - TENSOR_FLOAT16 = 8, - - /** - * A tensor of 8 bit boolean values. - * - * Values of this operand type are either true or false. A zero value - * represents false; any other value represents true. - */ - TENSOR_BOOL8 = 9, - - /** - * An IEEE 754 16 bit floating point scalar value. - */ - FLOAT16 = 10, - - /** - * A tensor of 8 bit signed integers that represent real numbers. - * - * This tensor is associated with additional fields that can - * be used to convert the 8 bit signed integer to the real value and vice versa. - * These fields are: - * - channelDim: a 32 bit unsigned integer indicating channel dimension. - * - scales: an array of positive 32 bit floating point values. - * The size of the scales array must be equal to dimensions[channelDim]. - * - *{@link SymmPerChannelQuantParams} must hold the parameters for an Operand of this type. - * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0). - * - * The formula is: - * realValue[..., C, ...] = - * integerValue[..., C, ...] * scales[C] - * where C is an index in the Channel dimension. - */ - TENSOR_QUANT8_SYMM_PER_CHANNEL = 11, - - /** - * A tensor of 16 bit unsigned integers that represent real numbers. - * - * Attached to this tensor are two numbers that can be used to convert the - * 16 bit integer to the real value and vice versa. These two numbers are: - * - scale: a 32 bit floating point value greater than zero. - * - zeroPoint: a 32 bit integer, in range [0, 65535]. - * - * The formula is: - * real_value = (integer_value - zeroPoint) * scale. - */ - TENSOR_QUANT16_ASYMM = 12, - - /** - * A tensor of 8 bit signed integers that represent real numbers. - * - * Attached to this tensor is a number representing real value scale that is - * used to convert the 8 bit number to a real value in the following way: - * realValue = integerValue * scale. - * - * scale is a 32 bit floating point with value greater than zero. - */ - TENSOR_QUANT8_SYMM = 13, - - /** - * A tensor of 8 bit signed integers that represent real numbers. - * - * Attached to this tensor are two numbers that can be used to convert the - * 8 bit integer to the real value and vice versa. These two numbers are: - * - scale: a 32 bit floating point value greater than zero. - * - zeroPoint: a 32 bit integer, in range [-128, 127]. - * - * The formula is: - * real_value = (integer_value - zeroPoint) * scale. - */ - TENSOR_QUANT8_ASYMM_SIGNED = 14, - - /** - * A reference to a subgraph. - * - * Must have the lifetime {@link Operand::LifeTime::SUBGRAPH}. - */ - SUBGRAPH = 15, - - /** - * DEPRECATED. Since HAL version 1.2, extensions are the preferred - * alternative to OEM operation and data types. - * - * OEM specific scalar value. - */ - OEM = 10000, - - /** - * DEPRECATED. Since HAL version 1.2, extensions are the preferred - * alternative to OEM operation and data types. - * - * A tensor of OEM specific values. - */ - TENSOR_OEM_BYTE = 10001, -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERAND_TYPES_H
diff --git a/common/include/nnapi/OperationTypes.h b/common/include/nnapi/OperationTypes.h deleted file mode 100644 index ab3f1c1..0000000 --- a/common/include/nnapi/OperationTypes.h +++ /dev/null
@@ -1,5237 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERATION_TYPES_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERATION_TYPES_H - -namespace android::nn { - -/** - * Operation types. - * - * The type of an operation in a model. - */ -enum class OperationType { - /** - * Adds two tensors, element-wise. - * - * Takes two input tensors of identical {@link OperandType} and compatible - * dimensions. The output is the sum of both input tensors, optionally - * modified by an activation function. - * - * Two dimensions are compatible when: - * 1. they are equal, or - * 2. one of them is 1 - * - * The size of the output is the maximum size along each dimension of the - * input operands. It starts with the trailing dimensions, and works its - * way forward. - * - * Example: - * - * input1.dimension = {4, 1, 2} - * input2.dimension = {5, 4, 3, 1} - * output.dimension = {5, 4, 3, 2} - * - * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero - * dimension is only compatible with 0 or 1. The size of the output - * dimension is zero if either of corresponding input dimension is zero. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandType}, and compatible dimensions - * as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scales and zeroPoint can be different from input0 scale and zeroPoint. - * * 2: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * For a {@link OperandType::TENSOR_INT32} tensor, - * the {@link FusedActivationFunc} must be "NONE". - * - * Outputs: - * * 0: The sum, a tensor of the same {@link OperandType} as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - */ - ADD = 0, - - /** - * Performs a 2-D average pooling operation. - * - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * The values in the output tensor are computed as: - * - * output[b, i, j, channel] = - * sum_{di, dj}( - * input[b, strides[1] * i + di, strides[2] * j + dj, channel] - * ) / sum(1) - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since HAL version 1.2. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since HAL version 1.2, zero batches is supported for this tensor. - * * 1: An {@link OperandType::INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 2: An {@link OperandType::INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 3: An {@link OperandType::INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 4: An {@link OperandType::INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 5: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 6: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 7: An {@link OperandType::INT32} scalar, specifying the filter - * width. - * * 8: An {@link OperandType::INT32} scalar, specifying the filter - * height. - * * 9: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 10: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since HAL version 1.2, zero batches is supported for this tensor. - * * 1: An {@link OperandType::INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. - * * 2: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 3: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 4: An {@link OperandType::INT32} scalar, specifying the filter - * width. - * * 5: An {@link OperandType::INT32} scalar, specifying the filter - * height. - * * 6: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 7: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth]. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - AVERAGE_POOL_2D = 1, - - /** - * Concatenates the input tensors along the given dimension. - * - * The input tensors must have identical {@link OperandType} and the same - * dimensions except the dimension along the concatenation axis. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * (full support since HAL version 1.2, see the input section) - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0 ~ n-1: The list of n input tensors, of shape - * [D0, D1, ..., Daxis(i), ..., Dm]. - * Before HAL version 1.2, all input tensors of - * {@link OperandType::TENSOR_QUANT8_ASYMM} - * must have the same scale and zeroPoint as the output tensor. - * Input tensors of - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} - * are allowed to have different scale and zeroPoint. - * Since HAL version 1.2, zero-sized tensors are supported. - * * n: An {@link OperandType::INT32} scalar, specifying the - * concatenation axis. - * - * Outputs: - * * 0: The output, a tensor of the same {@link OperandType} as the input - * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. - * Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, - * the scale and zeroPoint values can be different from - * input tensors. Before HAL version 1.2 they have to be the same as for the - * input tensors. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint values can be different from input tensors. - */ - CONCATENATION = 2, - - /** - * Performs a 2-D convolution operation. - * - * The CONV_2D op sweeps a 2-D filter that can mix channels together over a - * batch of images, applying the filter to each window of each image of the - * appropriate size. - * - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * The values in the output tensor are computed as: - * - * output[b, i, j, channel] = - * sum_{di, dj, k} ( - * input[b, strides[1] * i + di, strides[2] * j + dj, k] * - * filter[channel, di, dj, k] - * ) + bias[channel] - * - * Supported tensor {@link OperandType} configurations: - * * 32 bit floating point: - * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias. - * - * * Quantized: - * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output. - * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * Available since HAL version 1.2: - * * 16 bit floating point: - * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias. - * - * * Quantized with symmetric per channel quantization for the filter: - * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output. - * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since HAL version 1.2. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * Since HAL version 1.2, zero batches is supported for this tensor. - * * 1: A 4-D tensor, of shape - * [depth_out, filter_height, filter_width, depth_in], specifying the - * filter. - * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} - * the channel dimension (SymmPerChannelQuantParams::channelDim) - * must be set to 0. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link OperandType::TENSOR_FLOAT32} - * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} - * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint - * of 0 and bias_scale == input_scale * filter_scale. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, - * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 - * and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link OperandType::INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 4: An {@link OperandType::INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 5: An {@link OperandType::INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 6: An {@link OperandType::INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 7: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 8: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 9: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 10: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * * 11: An optional {@link OperandType::INT32} scalar, specifying the dilation - * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on width dimension. If this input is set, - * input 12 (dilation factor for height) must be specified as well. - * Available since HAL version 1.2. - * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation - * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on height dimension. If this input is set, - * input 11 (dilation factor for width) must be specified as well. - * Available since HAL version 1.2. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * Since HAL version 1.2, zero batches is supported for this tensor. - * * 1: A 4-D tensor, of shape - * [depth_out, filter_height, filter_width, depth_in], specifying the - * filter. - * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} - * the channel dimension (SymmPerChannelQuantParams::channelDim) - * must be set to 0. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link OperandType::TENSOR_FLOAT32} - * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same - * type. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} - * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint - * of 0 and bias_scale == input_scale * filter_scale. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, - * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 - * and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link OperandType::INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. - * * 4: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 5: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 6: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 7: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * * 8: An optional {@link OperandType::INT32} scalar, specifying the dilation - * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on width dimension. If this input is set, - * input 9 (dilation factor for height) must be specified as well. - * Available since HAL version 1.2. - * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation - * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on height dimension. If this input is set, - * input 8 (dilation factor for width) must be specified as well. - * Available since HAL version 1.2. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. - * Before HAL version 1.2, for output tensor of - * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition must - * be satisfied: output_scale > input_scale * filter_scale - */ - CONV_2D = 3, - - /** - * Performs a depthwise 2-D convolution operation. - * - * Given an input tensor of shape [batches, height, width, depth_in] and a - * filter tensor of shape [1, filter_height, filter_width, depth_out] - * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV - * applies a different filter to each input channel (expanding from 1 - * channel to channel_multiplier channels for each), then concatenates the - * results together. - * - * The output has depth_out = depth_in * depth_multiplier channels. - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * The values in the output tensor are computed as: - * - * output[b, i, j, k * channel_multiplier + q] = - * sum_{di, dj} ( - * input[b, strides[1] * i + di, strides[2] * j + dj, k] * - * filter[1, di, dj, k * channel_multiplier + q] - * ) + bias[k * channel_multiplier + q] - * - * Supported tensor {@link OperandType} configurations: - * * 32 bit floating point: - * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias. - * - * * Quantized: - * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output. - * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * Available since HAL version 1.2: - * * 16 bit floating point: - * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias. - * - * * Quantized with symmetric per channel quantization for the filter: - * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output. - * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Available since HAL version 1.3: - * * Quantized signed (since HAL version 1.3): - * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. - * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * * Quantized signed with filter symmetric per channel quantization - * (since HAL version 1.3): - * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. - * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since HAL version 1.2. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], - * specifying the filter. - * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} - * the channel dimension (SymmPerChannelQuantParams::channelDim) - * must be set to 3. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link OperandType::TENSOR_FLOAT32} - * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} - * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint - * of 0 and bias_scale == input_scale * filter_scale. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, - * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 - * and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link OperandType::INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 4: An {@link OperandType::INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 5: An {@link OperandType::INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 6: An {@link OperandType::INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 7: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 8: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 9: An {@link OperandType::INT32} scalar, specifying the depthwise - * multiplier. - * * 10: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 11: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation - * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on width dimension. If this input is set, - * input 13 (dilation factor for height) must be specified as well. - * Available since HAL version 1.2. - * * 13: An optional {@link OperandType::INT32} scalar, specifying the dilation - * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on height dimension. If this input is set, - * input 12 (dilation factor for width) must be specified as well. - * Available since HAL version 1.2. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], - * specifying the filter. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link OperandType::TENSOR_FLOAT32} - * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same type. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} - * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint - * of 0 and bias_scale == input_scale * filter_scale. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, - * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 - * and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link OperandType::INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. - * * 4: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 5: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 6: An {@link OperandType::INT32} scalar, specifying the depthwise - * multiplier. - * * 7: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 8: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation - * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on width dimension. If this input is set, - * input 10 (dilation factor for height) must be specified as well. - * Available since HAL version 1.2. - * * 10: An optional {@link OperandType::INT32} scalar, specifying the dilation - * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on height dimension. If this input is set, - * input 9 (dilation factor for width) must be specified as well. - * Available since HAL version 1.2. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. Before HAL version 1.2, for - * output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, - * the following condition must be satisfied: - * output_scale > input_scale * filter_scale - */ - DEPTHWISE_CONV_2D = 4, - - /** - * Rearranges data from depth into blocks of spatial data. - * - * More specifically, this op outputs a copy of the input tensor where - * values from the depth dimension are moved in spatial blocks to the height - * and width dimensions. The value block_size indicates the input block size - * and how the data is moved. - * - * Chunks of data of size block_size * block_size from depth are rearranged - * into non-overlapping blocks of size block_size x block_size. - * - * The width of the output tensor is input_depth * block_size, whereas the - * height is input_height * block_size. The depth of the input tensor must - * be divisible by block_size * block_size - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since HAL version 1.2. - * - * Inputs: - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * * 1: An {@link OperandType::INT32} scalar, specifying the block_size. - * block_size must be >=1 and block_size * block_size must be a divisor - * of the input depth. - * * 2: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * - * Outputs: - * * 0: The output 4-D tensor, of shape [batch, height*block_size, - * width*block_size, depth/(block_size*block_size)]. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - DEPTH_TO_SPACE = 5, - - /** - * Dequantizes the input tensor. - * - * The formula is: - * - * output = (input - zeroPoint) * scale. - * - * Supported input tensor {@link OperandType}: - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_SYMM} (since HAL version 1.2) - * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} (since HAL version 1.2) - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported output tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32}. - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: A tensor. - * Since HAL version 1.2, this tensor may be zero-sized. - * - * Outputs: - * * 0: A tensor with the same shape as input0. - */ - DEQUANTIZE = 6, - - /** - * Looks up sub-tensors in the input tensor. - * - * This operator takes for input a tensor of values (Values) and - * a one-dimensional tensor of selection indices (Lookups). - * The output tensor is the concatenation of sub-tensors of Values as - * selected by Lookups. - * - * Think of Values as being sliced along its first dimension: - * The entries in Lookups select which slices are concatenated together - * to create the output tensor. - * - * For example, if Values has shape of [40, 200, 300] and - * Lookups has shape of [3], all three values found in Lookups are - * expected to be between 0 and 39. The resulting tensor must - * have shape of [3, 200, 300]. - * - * If a value in Lookups is out of bounds, the operation must fail - * and an error must be reported. - * - * Supported value tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.3) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} (since HAL version 1.2) - * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2) - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported value tensor rank: from 2 - * - * Inputs: - * * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32}. - * The values are indices into the first dimension of Values. - * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are - * extracted. - * - * Output: - * * 0: A n-D tensor with the same rank and shape as the Values - * tensor, except for the first dimension which has the same size - * as Lookups' only dimension. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input1. - */ - EMBEDDING_LOOKUP = 7, - - /** - * Computes element-wise floor() on the input tensor. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor, of the same {@link OperandType} and dimensions as - * the input tensor. - */ - FLOOR = 8, - - /** - * Denotes a fully (densely) connected layer, which connects all elements - * in the input tensor with each element in the output tensor. - * - * This layer implements the operation: - * - * outputs = activation(inputs * weights’ + bias) - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor of at least rank 2, specifying the input. If rank is - * greater than 2, then it gets flattened to a 2-D Tensor. The - * (flattened) 2-D Tensor is reshaped (if necessary) to - * [batch_size, input_size], where "input_size" corresponds to the - * number of inputs to the layer, matching the second dimension of - * weights, and "batch_size" is calculated by dividing the number of - * elements by "input_size". - * Since HAL version 1.2, zero batch_size is supported for this tensor. - * * 1: A 2-D tensor, specifying the weights, of shape - * [num_units, input_size], where "num_units" corresponds to the number - * of output nodes. - * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input - * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should - * also be of {@link OperandType::TENSOR_FLOAT32}. - * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} - * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link OperandType::TENSOR_INT32}, - * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. - * * 3: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * - * Outputs: - * * 0: The output tensor, of shape [batch_size, num_units]. Before HAL version 1.2, for - * output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following - * condition must be satisfied: output_scale > input_scale * filter_scale. - */ - FULLY_CONNECTED = 9, - - /** - * Looks up sub-tensors in the input tensor using a key-value map. - * - * This operator takes for input a tensor of values (Values), - * a one-dimensional tensor of selection values (Lookups) and - * a one-dimensional tensor that maps these values to Values - * indexes. The output tensor is the concatenation of sub-tensors of - * Values as selected by Lookups via Keys. - * - * Think of Values as being sliced along its outer-most dimension. - * The output is a concatenation of selected slices, with one slice - * for each entry of Lookups. The slice selected is the one at the - * same index as the Maps entry that matches the value in Lookups. - * - * For a hit, the corresponding sub-tensor of Values is included - * in the Output tensor. For a miss, the corresponding sub-tensor in - * Output must have zero values. - * - * For example, if Values has shape of [40, 200, 300], - * Keys should have a shape of [40]. If Lookups tensor has shape - * of [3], three slices are being concatenated, so the resulting tensor - * must have the shape of [3, 200, 300]. If the first entry in Lookups - * has the value 123456, that value must be located in Keys tensor. - * If the sixth entry of Keys contains 123456, the sixth slice of Values - * must be selected. If no entry in Keys has 123456, a slice of zeroes - * must be concatenated. - * - * Supported value tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * - * Supported value tensor rank: from 2 - * - * Inputs: - * * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with - * shape [ k ]. - * * 1: Keys. A 1-D {@link OperandType::TENSOR_INT32} tensor with shape - * [ n ]; Keys and Values pair represent a map, i.e., the ith element - * in Keys (Keys[i]) is the key to select the ith sub-tensor in Values - * (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in - * ascending order. - * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension - * must be n. - * - * Outputs: - * * 0: Output. A tensor with shape [ k …]. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, - * the scale and zeroPoint must be the same as input2. - * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup - * hits (True) or not (False). - * Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0 - * and scale 1.0f. - * A non-zero byte represents True, a hit. A zero indicates otherwise. - */ - HASHTABLE_LOOKUP = 10, - - /** - * Applies L2 normalization along the axis dimension. - * - * The values in the output tensor are computed as: - * - * output[batch, row, col, channel] = - * input[batch, row, col, channel] / - * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) - * - * By default the axis dimension is the last dimension of the input tensor. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2) - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * Tensors with rank less than 4 are only supported since HAL version 1.2. - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be normalized. - * * 1: An optional {@link OperandType::INT32} scalar, default to -1, - * specifying the dimension normalization would be performed on. - * Negative index is used to specify axis from the end (e.g. -1 for - * the last axis). Must be in the range [-n, n). - * Available since HAL version 1.2. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} and same shape as input0. - * For {@link OperandType::TENSOR_QUANT8_ASYMM}, - * the scale must be 1.f / 128 and the zeroPoint must be 128. - * For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the scale must be 1.f / 128 and the zeroPoint must be 0. - * - * NOTE: Before HAL version 1.3, if the elements along an axis are all zeros, - * the result is undefined. Since HAL version 1.3, if the elements along an axis - * are all zeros, the result is logical zero. - */ - L2_NORMALIZATION = 11, - - /** - * Performs an 2-D L2 pooling operation. - * - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * The values in the output tensor are computed as: - * - * output[b, i, j, c] = - * sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) / - * sum(1)) - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since HAL version 1.2. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since HAL version 1.2, zero batches is supported for this tensor. - * * 1: An {@link OperandType::INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 2: An {@link OperandType::INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 3: An {@link OperandType::INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 4: An {@link OperandType::INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 5: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 6: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 7: An {@link OperandType::INT32} scalar, specifying the filter - * width. - * * 8: An {@link OperandType::INT32} scalar, specifying the filter - * height. - * * 9: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 10: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since HAL version 1.2, zero batches is supported for this tensor. - * * 1: An {@link OperandType::INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. - * * 2: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 3: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 4: An {@link OperandType::INT32} scalar, specifying the filter - * width. - * * 5: An {@link OperandType::INT32} scalar, specifying the filter - * height. - * * 6: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 7: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth]. - */ - L2_POOL_2D = 12, - - /** - * Applies Local Response Normalization along the depth dimension. - * - * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the - * last dimension), and each vector is normalized independently. Within a - * given vector, each component is divided by the weighted, squared sum of - * inputs within depth_radius. - * - * The output is calculated using this formula: - * - * sqr_sum[a, b, c, d] = sum( - * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)) - * output = input / pow((bias + alpha * sqr_sum), beta) - * - * For input tensor with rank less than 4, independently normalizes each - * 1-D slice along specified dimension. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: up to 4 - * Tensors with rank less than 4 are only supported since HAL version 1.2. - * - * Inputs: - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * * 1: An {@link OperandType::INT32} scalar, specifying the radius of - * the normalization window. - * * 2: A scalar, specifying the bias, must not be zero. - * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the bias - * value must be of {@link OperandType::FLOAT16}. - * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the bias - * value must be of {@link OperandType::FLOAT32}. - * * 3: A scalar, specifying the scale factor, alpha. - * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the - * alpha value must be of {@link OperandType::FLOAT16}. - * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the - * alpha value must be of {@link OperandType::FLOAT32}. - * * 4: A scalar, specifying the exponent, beta. - * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta - * value must be of {@link OperandType::FLOAT16}. - * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta - * value must be of {@link OperandType::FLOAT32}. - * * 5: An optional {@link OperandType::INT32} scalar, default to -1, - * specifying the dimension normalization would be performed on. - * Negative index is used to specify axis from the end (e.g. -1 for - * the last axis). Must be in the range [-n, n). - * Available since HAL version 1.2. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - */ - LOCAL_RESPONSE_NORMALIZATION = 13, - - /** - * Computes sigmoid activation on the input tensor element-wise. - * - * The output is calculated using this formula: - * - * output = 1 / (1 + exp(-input)) - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor, specifying the input. - * Since HAL version 1.2, this tensor may be zero-sized. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * For {@link OperandType::TENSOR_QUANT8_ASYMM}, - * the scale must be 1.f / 256 and the zeroPoint must be 0. - * For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the scale must be 1.f / 256 and the zeroPoint must be -128. - */ - LOGISTIC = 14, - - /** - * Projects an input to a bit vector via locality senstive hashing. - * - * Supported input tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * - * Supported input tensor rank: from 1 - * - * Inputs: - * * 0: Hash functions. Dim.size == 2, DataType: Float. - * Tensor[0].Dim[0]: Number of hash functions. - * Tensor[0].Dim[1]: Number of projected output bits generated by each - * hash function. - * If the projection type is Sparse: - * Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32 - * - * * 1: Input. Dim.size >= 1, no restriction on DataType. - * * 2: Weight. Optional. Dim.size == 1, DataType: Float. - * If not set, each input element is considered to have the same weight - * of 1.0. - * Tensor[1].Dim[0] == Tensor[2].Dim[0] - * * 3: Type: - * Sparse: - * Value LSHProjectionType_SPARSE(=3) (since HAL version 1.2). - * Computed bit vector is considered to be sparse. - * Each output element is an int32 made up of multiple bits - * computed from hash functions. - * - * NOTE: To avoid collisions across hash functions, an offset value - * of k * (1 << Tensor[0].Dim[1]) will be added to each signature, - * where k is the index of the hash function. - * - * Value LSHProjectionType_SPARSE_DEPRECATED(=1). - * Legacy behavior that does not include the offset value. - * - * Dense: - * Value LSHProjectionType_DENSE(=2). - * Computed bit vector is considered to be dense. Each output - * element represents a bit and can take the value of either - * 0 or 1. - * - * Outputs: - * * 0: If the projection type is Sparse: - * Output.Dim == { Tensor[0].Dim[0] } - * A tensor of int32 that represents hash signatures. - * - * If the projection type is Dense: - * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] } - * A flattened tensor that represents projected bit vectors. - * The offset value for sparse projections was added in HAL version 1.2. - */ - LSH_PROJECTION = 15, - - /** - * Performs a single time step in a Long Short-Term Memory (LSTM) layer - * - * The LSTM operation is described by the following equations. - * - * \f{eqnarray*}{ - * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\ - * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\ - * C_t =& clip(f_t \odot C_{t-1} + i_t \odot - * g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\ - * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\ - * & & \\ - * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj}) - * & if\ there\ is\ a\ projection; \\ - * h_t =& & \\ - * & o_t \odot g(C_t) & otherwise. \\ - * \f} - * Where: - * * \f$x_t\f$ is the input, - * * \f$i_t\f$ is the input gate, - * * \f$f_t\f$ is the forget gate, - * * \f$C_t\f$ is the cell state, - * * \f$o_t\f$ is the output, - * * \f$h_t\f$ is the output state, - * * \f$\sigma\f$ is the logistic sigmoid function, - * * \f$g\f$ is the cell input and cell output activation function, usually - * \f$tahn\f$, - * * \f$W_{xi}\f$ is the input-to-input weight matrix, - * * \f$W_{hi}\f$ is the recurrent to input weight matrix, - * * \f$W_{ci}\f$ is the cell-to-input weight matrix, - * * \f$b_i\f$ is the input gate bias, - * * \f$W_{xf}\f$ is the input-to-forget weight matrix, - * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix, - * * \f$W_{cf}\f$ is the cell-to-forget weight matrix, - * * \f$b_f\f$ is the forget gate bias, - * * \f$W_{xc}\f$ is the input-to-cell weight matrix, - * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix, - * * \f$b_c\f$ is the cell bias, - * * \f$W_{xo}\f$ is the input-to-output weight matrix, - * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix, - * * \f$W_{co}\f$ is the cell-to-output weight matrix, - * * \f$b_o\f$ is the output gate bias, - * * \f$W_{proj}\f$ is the projection weight matrix, - * * \f$b_{proj}\f$ is the projection bias, - * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and - * * \f$t_{proj}\f$ is the threshold for clipping the projected output. - * * \f$\odot\f$ is the - * <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)"> - * Hadamard product</a> that takes two matrices and produces another - * matrix, each element of which is the product of the corresponding - * elements of the input matrices. - * - * Since HAL version 1.2 LSTM supports layer normalization. - * In case layer normalization is used, the inputs to internal activation - * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered - * following an approach from section 3.1 from - * https://arxiv.org/pdf/1607.06450.pdf - * - * The operation has the following independently optional inputs: - * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights - * (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all - * have values or neither of them have values (i.e., all set to null). If - * they have values, the peephole optimization is used. - * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights - * (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values, - * or none of them have values. If they have no values, coupling of input - * and forget gates (CIFG) is used, in which case the input gate - * (\f$i_t\f$) is calculated using the following equation instead. - * \f{eqnarray*}{ - * i_t = 1 - f_t - * \f} - * In case peephole optimization is used and CIFG is not used - * cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the - * cell-to-input weights must have no value. - * * The projection weights (\f$W_{proj}\f$) is required only for the - * recurrent projection layer, and should otherwise have no value. - * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a - * value if the recurrent projection layer exists, and should otherwise - * have no value. - * * (HAL version 1.2 or later) The four layer normalization weights either all have - * values or none of them have values. Additionally, if CIFG is used, - * input layer normalization weights tensor is omitted and the other layer - * normalization weights either all have values or none of them have - * values. Layer normalization is used when the values of all the layer - * normalization weights are present. - * - * References: - * - * The default non-peephole non-CIFG implementation is based on: - * http://www.bioinf.jku.at/publications/older/2604.pdf - * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural - * Computation, 9(8):1735-1780, 1997. - * - * The peephole implementation and projection layer is based on: - * https://research.google.com/pubs/archive/43905.pdf - * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory - * recurrent neural network architectures for large scale acoustic - * modeling." INTERSPEECH, 2014. - * (However, the concept of peephole optimization was introduced in work - * prior to this paper.) - * - * The coupling of input and forget gate (CIFG) is based on: - * http://arxiv.org/pdf/1503.04069.pdf - * Greff et al. "LSTM: A Search Space Odyssey" - * - * The layer normalization is based on: - * https://arxiv.org/pdf/1607.06450.pdf - * Jimmy Ba et al. "Layer Normalization" - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * - * All input and output tensors must be of the same type. - * - * Inputs: - * * 0: The input (\f$x_t\f$). - * A 2-D tensor of shape [batch_size, input_size], where “batch_size” - * corresponds to the batching dimension, and “input_size” is the size - * of the input. - * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. - * A 2-D tensor of shape [num_units, input_size], where “num_units” - * corresponds to the number of cell units. - * * 2: The input-to-forget weights (\f$W_{xf}\f$). - * A 2-D tensor of shape [num_units, input_size]. - * * 3: The input-to-cell weights (\f$W_{xc}\f$). - * A 2-D tensor of shape [num_units, input_size]. - * * 4: The input-to-output weights (\f$W_{xo}\f$). - * A 2-D tensor of shape [num_units, input_size]. - * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. - * A 2-D tensor of shape [num_units, output_size], where “output_size” - * corresponds to either the number of cell units (i.e., “num_units”), - * or the second dimension of the “projection_weights”, if defined. - * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). - * A 2-D tensor of shape [num_units, output_size]. - * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). - * A 2-D tensor of shape [num_units, output_size]. - * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). - * A 2-D tensor of shape [num_units, output_size]. - * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 12:The input gate bias (\f$b_i\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 13:The forget gate bias (\f$b_f\f$). - * A 1-D tensor of shape [num_units]. - * * 14:The cell bias (\f$b_c\f$). - * A 1-D tensor of shape [num_units]. - * * 15:The output gate bias (\f$b_o\f$). - * A 1-D tensor of shape [num_units]. - * * 16:The projection weights (\f$W_{proj}\f$). Optional. - * A 2-D tensor of shape [output_size, num_units]. - * * 17:The projection bias (\f$b_{proj}\f$). Optional. - * A 1-D tensor of shape [output_size]. - * * 18:The output state (in) (\f$h_{t-1}\f$). - * A 2-D tensor of shape [batch_size, output_size]. - * * 19:The cell state (in) (\f$C_{t-1}\f$). - * A 2-D tensor of shape [batch_size, num_units]. - * * 20:The activation function (\f$g\f$). - * A value indicating the activation function: - * <ul> - * <li>0: None; - * <li>1: Relu; - * <li>3: Relu6; - * <li>4: Tanh; - * <li>6: Sigmoid. - * </ul> - * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such - * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 - * then clipping is disabled. - * Until HAL version 1.2 this scalar must be of type {@link - * OperandType::FLOAT32}. Since HAL version 1.2, if all the input - * tensors have type {@link OperandType::TENSOR_FLOAT32}, this - * scalar must be of the type {@link OperandType::FLOAT32}, - * otherwise if all the input tensors have the type {@link - * OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link - * OperandType::FLOAT16}. - * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the - * projection layer, such that values are bound within - * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. - * Until HAL version 1.2 this scalar must be of type {@link - * OperandType::FLOAT32}. Since HAL version 1.2, if all the input - * tensors have type {@link OperandType::TENSOR_FLOAT32}, this - * scalar must be of the type {@link OperandType::FLOAT32}, - * otherwise if all the input tensors have the type {@link - * OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link - * OperandType::FLOAT16}. - * Since HAL version 1.2 there are additional inputs to this op: - * * 23:The input layer normalization weights. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at input gate. - * * 24:The forget layer normalization weights. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at forget gate. - * * 25:The cell layer normalization weights. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at cell gate. - * * 26:The output layer normalization weights. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at output gate. - * - * Outputs: - * * 0: The scratch buffer. - * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or - * [batch_size, num_units * 4] without CIFG. - * * 1: The output state (out) (\f$h_t\f$). - * A 2-D tensor of shape [batch_size, output_size]. - * * 2: The cell state (out) (\f$C_t\f$). - * A 2-D tensor of shape [batch_size, num_units]. - * * 3: The output (\f$o_t\f$). - * A 2-D tensor of shape [batch_size, output_size]. This is effectively - * the same as the current “output state (out)” value. - */ - LSTM = 16, - - /** - * Performs an 2-D max pooling operation. - * - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * The values in the output tensor are computed as: - * - * output[b, i, j, channel] = - * max_{di, dj} ( - * input[b, strides[1] * i + di, strides[2] * j + dj, channel] - * ) - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since HAL version 1.2. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since HAL version 1.2, zero batches is supported for this tensor. - * * 1: An {@link OperandType::INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 2: An {@link OperandType::INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 3: An {@link OperandType::INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 4: An {@link OperandType::INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 5: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 6: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 7: An {@link OperandType::INT32} scalar, specifying the filter - * width. - * * 8: An {@link OperandType::INT32} scalar, specifying the filter - * height. - * * 9: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 10: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since HAL version 1.2, zero batches is supported for this tensor. - * * 1: An {@link OperandType::INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. - * * 2: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 3: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 4: An {@link OperandType::INT32} scalar, specifying the filter - * width. - * * 5: An {@link OperandType::INT32} scalar, specifying the filter - * height. - * * 6: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 7: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth]. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - MAX_POOL_2D = 17, - - /** - * Multiplies two tensors, element-wise. - * - * Takes two input tensors of identical {@link OperandType} and compatible - * dimensions. The output is the product of both input tensors, optionally - * modified by an activation function. - * - * Two dimensions are compatible when: - * 1. they are equal, or - * 2. one of them is 1 - * - * The size of the resulting output is the maximum size along each dimension - * of the input operands. It starts with the trailing dimensions, and works - * its way forward. - * - * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero - * dimension is only compatible with 0 or 1. The size of the output - * dimension is zero if either of corresponding input dimension is zero. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandType}, and compatible dimensions - * as input0. - * * 2: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * For a {@link OperandType::TENSOR_INT32} tensor, - * the {@link FusedActivationFunc} must be "NONE". - * - * Outputs: - * * 0: The product, a tensor of the same {@link OperandType} as input0. - * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} - * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the following condition must be satisfied: - * output_scale > input1_scale * input2_scale. - */ - MUL = 18, - - /** - * Computes rectified linear activation on the input tensor element-wise. - * - * The output is calculated using this formula: - * - * output = max(0, input) - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor, specifying the input. - * Since HAL version 1.2, this tensor may be zero-sized. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - RELU = 19, - - /** - * Computes rectified linear 1 activation on the input tensor element-wise. - * - * The output is calculated using this formula: - * - * output = min(1.f, max(-1.f, input)) - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor, specifying the input. - * Since HAL version 1.2, this tensor may be zero-sized. - * - * Outputs: - * * 0: The output tensor of the same shape as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - RELU1 = 20, - - /** - * Computes rectified linear 6 activation on the input tensor element-wise. - * - * The output is calculated using this formula: - * - * output = min(6, max(0, input)) - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor, specifying the input. - * Since HAL version 1.2, this tensor may be zero-sized. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - RELU6 = 21, - - /** - * Reshapes a tensor. - * - * Given tensor, this operation returns a tensor that has the same values as - * tensor, but with a newly specified shape. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor, specifying the tensor to be reshaped. - * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}, defining the - * shape of the output tensor. The number of elements implied by shape - * must be the same as the number of elements in the input tensor. - * - * If one component of shape is the special value -1, the size of that - * dimension is computed so that the total size remains constant. In - * particular, a shape of [-1] flattens into 1-D. At most one component - * of shape can be -1. - * - * Outputs: - * * 0: The output tensor, of shape specified by the input shape. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - RESHAPE = 22, - - /** - * Resizes images to given size using the bilinear interpretation. - * - * Resized images must be distorted if their output aspect ratio is not the - * same as input aspect ratio. The corner pixels of output may not be the - * same as corner pixels of input. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2) - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since HAL version 1.2. - * - * Both resizing by shape and resizing by scale are supported. - * - * Inputs (resizing by shape): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since HAL version 1.2, zero batches is supported for this tensor. - * * 1: An {@link OperandType::INT32} scalar, specifying the output - * width of the output tensor. - * * 2: An {@link OperandType::INT32} scalar, specifying the output - * height of the output tensor. - * * 3: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * * 4: Align corners. An optional {@link OperandType::BOOL} - * scalar, default to false. If True, the centers of the 4 corner - * pixels of the input and output tensors are aligned, preserving the - * values at the corner pixels. - * Available since HAL version 1.3. - * * 5: Half pixel centers. An optional {@link OperandType::BOOL} - * scalar, default to false. If True, the pixel centers are assumed to - * be at (0.5, 0.5). This is the default behavior of image.resize in - * TF 2.0. If this parameter is True, then align_corners parameter - * must be False. - * Available since HAL version 1.3. - * - * Inputs (resizing by scale, since HAL version 1.2): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. Zero batches is supported for this tensor. - * * 1: A scalar, specifying width_scale, the scaling factor of the width - * dimension from the input tensor to the output tensor. The output - * width is calculated as new_width = floor(width * width_scale). - * The scalar must be of {@link OperandType::FLOAT16} if input0 is - * of {@link OperandType::TENSOR_FLOAT16} and of - * {@link OperandType::FLOAT32} otherwise. - * * 2: A scalar, specifying height_scale, the scaling factor of the height - * dimension from the input tensor to the output tensor. The output - * height is calculated as new_height = floor(height * height_scale). - * The scalar must be of {@link OperandType::FLOAT16} if input0 is - * of {@link OperandType::TENSOR_FLOAT16} and of - * {@link OperandType::FLOAT32} otherwise. - * * 3: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * * 4: Align corners. An optional {@link OperandType::BOOL} - * scalar, default to false. If True, the centers of the 4 corner - * pixels of the input and output tensors are aligned, preserving the - * values at the corner pixels. - * Available since HAL version 1.3. - * * 5: Half pixel centers. An optional {@link OperandType::BOOL} - * scalar, default to false. If True, the pixel centers are assumed to - * be at (0.5, 0.5). This is the default behavior of image.resize in - * TF 2.0. If this parameter is True, then align_corners parameter - * must be False. - * Available since HAL version 1.3. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, new_height, new_width, depth]. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, - * the scale and zeroPoint must be the same as input0. - */ - RESIZE_BILINEAR = 23, - - /** - * A basic recurrent neural network layer. - * - * This layer implements the operation: - * outputs = state = activation(inputs * input_weights + - * state * recurrent_weights + bias) - * - * Where: - * * “input_weights” is a weight matrix that multiplies the inputs; - * * “recurrent_weights” is a weight matrix that multiplies the current - * “state” which itself is the output from the previous time step - * computation; - * * “bias” is a bias vector (added to each output vector in the batch); - * * “activation” is the function passed as the “fused_activation_function” - * argument (if not “NONE”). - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * - * The input tensors must all be the same type. - * - * Inputs: - * * 0: input. - * A 2-D tensor of shape [batch_size, input_size], where “batch_size” - * corresponds to the batching dimension, and “input_size” is the size - * of the input. - * * 1: weights. - * A 2-D tensor of shape [num_units, input_size], where “num_units” - * corresponds to the number of units. - * * 2: recurrent_weights. - * A 2-D tensor of shape [num_units, num_units], with columns - * corresponding to the weights from each unit. - * * 3: bias. - * A 1-D tensor of shape [num_units]. - * * 4: hidden state (in). - * A 2-D tensor of shape [batch_size, num_units]. - * * 5: fused_activation_function. - * An optional {@link FusedActivationFunc} value indicating the - * activation function. If “NONE” is specified then it results in a - * linear activation. - * - * Outputs: - * * 0: hidden state (out). - * A 2-D tensor of shape [batch_size, num_units]. - * - * * 1: output. - * A 2-D tensor of shape [batch_size, num_units]. This is effectively - * the same as the current state value. - */ - RNN = 24, - - /** - * Computes the softmax activation on the input tensor element-wise, per - * batch, by normalizing the input vector so the maximum coefficient is - * zero. - * - * The output is calculated using this formula: - * - * output[batch, i] = - * exp((input[batch, i] - max(input[batch, :])) * beta) / - * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)} - * - * For input tensor with rank other than 2, the activation will be applied - * independently on each 1-D slice along specified dimension. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4. - * Tensors with rank other than 2 or 4 are only supported since HAL version 1.2. - * - * Inputs: - * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. - * Since HAL version 1.2, this tensor may be zero-sized. - * * 1: A scalar, specifying the positive scaling factor for the exponent, - * beta. If input0 is of {@link OperandType::TENSOR_FLOAT32}, - * {@link OperandType::TENSOR_QUANT8_ASYMM} or - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, the scalar - * must be of {@link OperandType::FLOAT32}. - * If input0 is of {@link OperandType::TENSOR_FLOAT16}, then the - * scalar must be of {@link OperandType::FLOAT16}. - * * 2: An optional {@link OperandType::INT32} scalar, default to -1, - * specifying the dimension the activation would be performed on. - * Negative index is used to specify axis from the end (e.g. -1 for - * the last axis). Must be in the range [-n, n). - * Available since HAL version 1.2. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * For {@link OperandType::TENSOR_QUANT8_ASYMM}, - * the scale must be 1.f / 256 and the zeroPoint must be 0. - * For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the scale must be 1.f / 256 and the zeroPoint must be -128. - */ - SOFTMAX = 25, - - /** - * Rearranges blocks of spatial data, into depth. - * - * More specifically, this op outputs a copy of the input tensor where - * values from the height and width dimensions are moved to the depth - * dimension. The value block_size indicates the input block size and how - * the data is moved. - * - * Chunks of data of size block_size * block_size from depth are rearranged - * into non-overlapping blocks of size block_size x block_size. - * - * The depth of the output tensor is input_depth * block_size * block_size. - * The input tensor's height and width must be divisible by block_size. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since HAL version 1.2. - * - * Inputs: - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * * 1: An {@link OperandType::INT32} scalar, specifying the block_size. - * block_size must be >=1 and block_size must be a divisor of both the - * input height and width. - * * 2: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * - * Outputs: - * * 0: The output 4-D tensor, of shape [batches, height/block_size, - * width/block_size, depth_in*block_size*block_size]. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - SPACE_TO_DEPTH = 26, - - /** - * SVDF op is a kind of stateful layer derived from the notion that a - * densely connected layer that's processing a sequence of input frames can - * be approximated by using a singular value decomposition of each of its - * nodes. The implementation is based on: - * - * https://research.google.com/pubs/archive/43813.pdf - * - * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada. - * “Compressing Deep Neural Networks using a Rank-Constrained Topology”. - * INTERSPEECH, 2015. - * - * It processes the incoming input using a 2-stage filtering mechanism: - * * stage 1 performs filtering on the "features" dimension, whose outputs - * get pushed into a memory of fixed-size memory_size. - * * stage 2 performs filtering on the "time" dimension of the memory_size - * memoized outputs of stage 1. - * - * Specifically, for rank 1, this layer implements the operation: - * - * memory = push(conv1d(inputs, weights_feature, feature_dim, - * "PADDING_VALID")); - * outputs = activation(memory * weights_time + bias); - * - * Where: - * * “weights_feature” is a weights matrix that processes the inputs (by - * convolving the input with every “feature filter”), and whose outputs - * get pushed, stacked in order, into the fixed-size “memory” (the oldest - * entry gets dropped); - * * “weights_time” is a weights matrix that processes the “memory” (by a - * batched matrix multiplication on the num_units); - * * “bias” is an optional bias vector (added to each output vector in the - * batch); and - * * “activation” is the function passed as the “fused_activation_function” - * argument (if not “NONE”). - * - * Each rank adds a dimension to the weights matrices by means of stacking - * the filters. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * - * All input tensors must be the same type. - * - * Inputs: - * * 0: input. - * A 2-D tensor of shape [batch_size, input_size], where “batch_size” - * corresponds to the batching dimension, and “input_size” is the size - * of the input. - * * 1: weights_feature. - * A 2-D tensor of shape [num_units, input_size], where “num_units” - * corresponds to the number of units. - * * 2: weights_time. - * A 2-D tensor of shape [num_units, memory_size], where “memory_size” - * corresponds to the fixed-size of the memory. - * * 3: bias. - * An optional 1-D tensor of shape [num_units]. - * * 4: state (in). - * A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank]. - * * 5: rank. - * The rank of the SVD approximation. - * * 6: fused_activation_function. - * An optional {@link FusedActivationFunc} value indicating the - * activation function. If “NONE” is specified then it results in a - * linear activation. - * - * Outputs: - * * 0: state (out). - * A 2-D tensor of the same {@link OperandType} as the inputs, with shape - * [batch_size, (memory_size - 1) * num_units * rank]. - * * 1: output. - * A 2-D tensor of the same {@link OperandType} as the inputs, with shape - * [batch_size, num_units]. - */ - SVDF = 27, - - /** - * Computes hyperbolic tangent of input tensor element-wise. - * - * The output is calculated using this formula: - * - * output = tanh(input) - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2) - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor, specifying the input. - * Since HAL version 1.2, this tensor may be zero-sized. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * For {@link OperandType::TENSOR_QUANT8_ASYMM}, - * the scale must be 1.f / 128 and the zeroPoint must be 128. - * For {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the scale must be 1.f / 128 and the zeroPoint must be 0. - */ - TANH = 28, - - /** - * BatchToSpace for N-dimensional tensors. - * - * This operation reshapes the batch dimension (dimension 0) into M + 1 - * dimensions of shape block_shape + [batch], interleaves these blocks back - * into the grid defined by the spatial dimensions [1, ..., M], to obtain a - * result with the same rank as the input. - * - * This is the reverse of SpaceToBatch. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since HAL version 1.2. - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be reshaped - * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block - * sizes for each spatial dimension of the input tensor. All values - * must be >= 1. - * * 2: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since API level 29. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - BATCH_TO_SPACE_ND = 29, - - /** - * Element-wise division of two tensors. - * - * Takes two input tensors of identical {@link OperandType} and compatible - * dimensions. The output is the result of dividing the first input tensor - * by the second, optionally modified by an activation function. - * - * For inputs of {@link OperandType::TENSOR_INT32}, performs - * "floor division" ("//" in Python). For example, - * 5 // 2 = 2 - * -5 // 2 = -3 - * - * Two dimensions are compatible when: - * 1. they are equal, or - * 2. one of them is 1 - * - * The size of the output is the maximum size along each dimension of the - * input operands. It starts with the trailing dimensions, and works its way - * forward. - * - * Example: - * input1.dimension = {4, 1, 2} - * input2.dimension = {5, 4, 3, 1} - * output.dimension = {5, 4, 3, 2} - * - * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero - * dimension is only compatible with 0 or 1. The size of the output - * dimension is zero if either of corresponding input dimension is zero. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the first input. - * * 1: A tensor of the same {@link OperandType}, and compatible dimensions - * as input0. - * * 2: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * For a {@link OperandType::TENSOR_INT32} tensor, - * the {@link FusedActivationFunc} must be "NONE". - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - */ - DIV = 30, - - /** - * Computes the mean of elements across dimensions of a tensor. - * - * Reduces the input tensor along the given dimensions to reduce. Unless - * keep_dims is true, the rank of the tensor is reduced by 1 for each entry - * in axis. If keep_dims is true, the reduced dimensions are retained with - * length 1. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: A tensor, specifying the input. - * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}. The dimensions - * to reduce. Must be in the range - * [-rank(input_tensor), rank(input_tensor)). - * - * NOTE: When the operation was introduced, the documentation - * incorrectly stated that if dimensions were empty, the operation - * would reduce across all dimensions. This behavior was never - * implemented. - * - * * 2: An {@link OperandType::INT32} scalar, keep_dims. If positive, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - */ - MEAN = 31, - - /** - * Pads a tensor. - * - * This operation pads a tensor according to the specified paddings. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * (full support since HAL version 1.2, see the output section) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be padded. - * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings - * for each spatial dimension of the input tensor. The shape of the - * tensor must be {rank(input0), 2}. - * padding[i, 0] specifies the number of elements to be padded in the - * front of dimension i. - * padding[i, 1] specifies the number of elements to be padded after the - * end of dimension i. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. The - * output tensor has the same rank as input0, and each - * dimension of the output tensor has the same size as the - * corresponding dimension of the input tensor plus the size - * of the padding: - * output0.dimension[i] = - * padding[i, 0] + input0.dimension[i] + padding[i, 1] - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * NOTE: Before HAL version 1.2, the pad value for - * {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined. - * Since HAL version 1.2, the pad value is always the logical zero. - */ - PAD = 32, - - /** - * SpaceToBatch for N-Dimensional tensors. - * - * This operation divides "spatial" dimensions [1, ..., M] of the input into - * a grid of blocks of shape block_shape, and interleaves these blocks with - * the "batch" dimension (0) such that in the output, the spatial dimensions - * [1, ..., M] correspond to the position within the grid, and the batch - * dimension combines both the position within a spatial block and the - * original batch position. Prior to division into blocks, the spatial - * dimensions of the input are optionally zero padded according to paddings. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * (full support since HAL version 1.2, see the output section) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since HAL version 1.2. - * - * Inputs: - * * 0: An n-D tensor, specifying the input. - * * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block - * sizes for each spatial dimension of the input tensor. All values - * must be >= 1. - * * 2: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings - * for each spatial dimension of the input tensor. All values must be - * >= 0. The shape of the tensor must be {M, 2}, where M is the number - * of spatial dimensions. - * padding[i, 0] specifies the number of element to be padded in the - * front of dimension i. - * padding[i, 1] specifies the number of element to be padded after the - * end of dimension i. - * * 3: An optional {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since HAL version 1.2. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * NOTE: Before HAL version 1.2, the pad value for - * {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined. - * Since HAL version 1.2, the pad value is always the logical zero. - */ - SPACE_TO_BATCH_ND = 33, - - /** - * Removes dimensions of size 1 from the shape of a tensor. - * - * Given a tensor input, this operation returns a tensor of the same - * {@link OperandType} with all dimensions of size 1 removed. If you don't - * want to remove all size 1 dimensions, you can remove specific size 1 - * dimensions by specifying the axes (input1). - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, the tensor to be squeezed. - * * 1: An optional 1-D tensor of {@link OperandType::TENSOR_INT32}. The - * dimensions to squeeze. If specified only squeezes the dimensions - * listed. Otherwise, squeezes all dimensions. The dimension index - * starts at 0. An error must be reported if squeezing a dimension that - * is not 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. Contains the - * same data as input, but has one or more dimensions of size 1 - * removed. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * If all input dimensions are equal to 1 and are to be squeezed, the - * output shape is [1]. - */ - SQUEEZE = 34, - - /** - * Extracts a strided slice of a tensor. - * - * Roughly speaking, this op extracts a slice of size (end - begin) / stride - * from the given input tensor. Starting at the location specified by begin - * the slice continues by adding stride to the index until all dimensions - * are not less than end. Note that a stride can be negative, which causes a - * reverse slice. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be sliced. - * * 1: begin, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The - * starts of the dimensions of the input tensor to be sliced. The - * length must be of rank(input0). - * * 2: end, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The - * ends of the dimensions of the input tensor to be sliced. The length - * must be of rank(input0). - * * 3: strides, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The - * strides of the dimensions of the input tensor to be sliced. The - * length must be of rank(input0). The entries must be non-zero. - * * 4: begin_mask, an {@link OperandType::INT32} scalar. If the ith bit - * of begin_mask is set, begin[i] is ignored and the fullest possible - * range in that dimension is used instead. - * * 5: end_mask, an {@link OperandType::INT32} scalar. If the ith bit of - * end_mask is set, end[i] is ignored and the fullest possible range in - * that dimension is used instead. - * * 6: shrink_axis_mask, an {@link OperandType::INT32} scalar. If the - * ith bit of shrink_axis_mask is set, the ith dimension specification - * shrinks the dimensionality by 1, taking on the value at index - * begin[i]. In this case, the ith specification must define a - * slice of size 1, e.g. begin[i] = x, end[i] = x + 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0 and rank (n - k), - * where k is the number of bits set in shrink_axis_mask. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * If shrink_axis_mask is true for all input dimensions, the output - * shape is [1]. - */ - STRIDED_SLICE = 35, - - /** - * Element-wise subtraction of two tensors. - * - * Takes two input tensors of identical {@link OperandType} and compatible - * dimensions. The output is the result of subtracting the second input - * tensor from the first one, optionally modified by an activation function. - * - * Two dimensions are compatible when: - * 1. they are equal, or - * 2. one of them is 1 - * - * The size of the output is the maximum size along each dimension of the - * input operands. It starts with the trailing dimensions, and works its way - * forward. - * - * Example: - * input1.dimension = {4, 1, 2} - * input2.dimension = {5, 4, 3, 1} - * output.dimension = {5, 4, 3, 2} - * - * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero - * dimension is only compatible with 0 or 1. The size of the output - * dimension is zero if either of corresponding input dimension is zero. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2) - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the first input. - * * 1: A tensor of the same {@link OperandType}, and compatible dimensions - * as input0. - * * 2: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * For a {@link OperandType::TENSOR_INT32} tensor, - * the {@link FusedActivationFunc} must be "NONE". - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - */ - SUB = 36, - - /** - * Transposes the input tensor, permuting the dimensions according to the - * perm tensor. - * - * The returned tensor's dimension i corresponds to the input dimension - * perm[i]. If perm is not given, it is set to (n-1...0), where n is the - * rank of the input tensor. Hence by default, this operation performs a - * regular matrix transpose on 2-D input Tensors. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2) - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be transposed. - * Since HAL version 1.2, this tensor may be zero-sized. - * * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32}, - * the permutation of the dimensions of the input tensor. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - TRANSPOSE = 37, - - /** - * Computes the absolute value of a tensor, element-wise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} (since HAL version 1.3) - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - */ - ABS = 38, - - /** - * Returns the index of the largest element along an axis. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: An n-D tensor specifying the input. Must be non-empty. - * * 1: An {@link OperandType::INT32} scalar specifying the axis to - * reduce across. Negative index is used to specify axis from the - * end (e.g. -1 for the last axis). Must be in the range [-n, n). - * - * Outputs: - * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor. - * If input is 1-dimensional, the output shape is [1]. - */ - // There is no underscore in ARG_MAX to avoid name conflict with - // the macro defined in libc/kernel/uapi/linux/limits.h. - ARGMAX = 39, - - /** - * Returns the index of the smallest element along an axis. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: An n-D tensor specifying the input. Must be non-empty. - * * 1: An {@link OperandType::INT32} scalar specifying the axis to - * reduce across. Negative index is used to specify axis from the - * end (e.g. -1 for the last axis). Must be in the range [-n, n). - * - * Outputs: - * * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor. - * If input is 1-dimensional, the output shape is [1]. - */ - ARGMIN = 40, // See ARGMAX for naming discussion. - - /** - * Transform axis-aligned bounding box proposals using bounding box deltas. - * - * Given the positions of bounding box proposals and the corresponding - * bounding box deltas for each class, return the refined bounding box - * regions. The resulting bounding boxes are cliped against the edges of - * the image. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT16_ASYMM} - * - * Inputs: - * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the - * bounding box proposals, each line with format [x1, y1, x2, y2]. - * For tensor of type {@link OperandType::TENSOR_QUANT16_ASYMM}, - * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois - * is supported for this tensor. - * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the - * bounding box delta for each region of interest and each class. The - * bounding box deltas are organized in the following order - * [dx, dy, dw, dh], where dx and dy is the relative correction factor - * for the center position of the bounding box with respect to the width - * and height, dw and dh is the log-scale relative correction factor - * for the width and height. For input0 of type - * {@link OperandType::TENSOR_QUANT16_ASYMM}, this tensor should be - * of {@link OperandType::TENSOR_QUANT8_ASYMM} or - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is - * supported for this tensor. - * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape - * [num_rois], specifying the batch index of each box. Boxes with - * the same batch index are grouped together. Zero num_rois is - * supported for this tensor. - * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of - * each image in the batch, each line with format - * [image_height, image_width]. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0, with shape - * [num_rois, num_classes * 4], specifying the coordinates of each - * output bounding box for each class, with format [x1, y1, x2, y2]. - * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the - * scale must be 0.125 and the zero point must be 0. - */ - AXIS_ALIGNED_BBOX_TRANSFORM = 41, - - /** - * A recurrent neural network layer that applies an LSTM cell to a - * sequence of inputs in forward and backward directions. - * - * The op supports cross-linking via an auxiliary input. Regular cell feeds - * one input into the two RNN cells in the following way: - * - * INPUT (INPUT_REVERSED) - * | | - * --------------------- - * | FW_LSTM BW_LSTM | - * --------------------- - * | | - * FW_OUT BW_OUT - * - * An op with cross-linking takes two inputs and feeds them into the RNN - * cells in the following way: - * - * AUX_INPUT (AUX_INPUT_REVERSED) - * | | - * INPUT | (INPUT_R'D.)| - * | | | | - * ----------------------- - * | \ / \ / | - * | FW_LSTM BW_LSTM | - * ----------------------- - * | | - * FW_OUT BW_OUT - * - * The cross-linking mode is enabled iff auxiliary input and auxiliary - * weights are present. While stacking this op on top of itself, this - * allows to connect both forward and backward outputs from previous cell - * to the next cell's input. - * - * Since HAL version 1.3 parallel linking mode is supported. The mode is - * enabled if auxiliary input is present but auxiliary weights are omitted. - * In this case, the cell feeds inputs into the RNN in the following way: - * - * INPUT (AUX_INPUT_REVERSED) - * | | - * --------------------- - * | FW_LSTM BW_LSTM | - * --------------------- - * | | - * FW_OUT BW_OUT - * - * While stacking this op on top of itself, this allows to connect both - * forward and backward outputs from previous cell to the next cell's - * corresponding inputs. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: 3, either time-major or batch-major. - * - * All input and output tensors must be of the same type. - * - * Inputs: - * * 0: The input. - * A 3-D tensor of shape: - * If time-major: [max_time, batch_size, input_size] - * If batch-major: [batch_size, max_time, input_size] - * where "max_time" is the number of timesteps (sequence length), - * "batch_size" corresponds to the batching dimension, and - * "input_size" is the size of the input. - * * 1: The forward input-to-input weights. Optional. - * A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units” - * corresponds to the number of forward cell units. - * * 2: The forward input-to-forget weights. - * A 2-D tensor of shape [fw_num_units, input_size]. - * * 3: The forward input-to-cell weights. - * A 2-D tensor of shape [fw_num_units, input_size]. - * * 4: The forward input-to-output weights. - * A 2-D tensor of shape [fw_num_units, input_size]. - * * 5: The forward recurrent-to-input weights. Optional. - * A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size” - * corresponds to either the number of cell units (i.e., fw_num_units), - * or the second dimension of the “fw_projection_weights”, if defined. - * * 6: The forward recurrent-to-forget weights. - * A 2-D tensor of shape [fw_num_units, fw_output_size]. - * * 7: The forward recurrent-to-cell weights. - * A 2-D tensor of shape [fw_num_units, fw_output_size]. - * * 8: The forward recurrent-to-output weights. - * A 2-D tensor of shape [fw_num_units, fw_output_size]. - * * 9: The forward cell-to-input weights. Optional. - * A 1-D tensor of shape [fw_num_units]. - * * 10: The forward cell-to-forget weights. Optional. - * A 1-D tensor of shape [fw_num_units]. - * * 11: The forward cell-to-output weights. Optional. - * A 1-D tensor of shape [fw_num_units]. - * * 12: The forward input gate bias. Optional. - * A 1-D tensor of shape [fw_num_units]. - * * 13: The forward forget gate bias. - * A 1-D tensor of shape [fw_num_units]. - * * 14: The forward cell gate bias. - * A 1-D tensor of shape [fw_num_units]. - * * 15: The forward output gate bias. - * A 1-D tensor of shape [fw_num_units]. - * * 16: The forward projection weights. Optional. - * A 2-D tensor of shape [fw_output_size, fw_num_units]. - * * 17: The forward projection bias. Optional. - * A 1-D tensor of shape [fw_output_size]. - * * 18: The backward input-to-input weights. Optional. - * A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units” - * corresponds to the number of backward cell units. - * * 19: The backward input-to-forget weights. - * A 2-D tensor of shape [bw_num_units, input_size]. - * * 20: The backward input-to-cell weights. - * A 2-D tensor of shape [bw_num_units, input_size]. - * * 21: The backward input-to-output weights. - * A 2-D tensor of shape [bw_num_units, input_size]. - * * 22: The backward recurrent-to-input weights. Optional. - * A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size” - * corresponds to either the number of cell units (i.e., “bw_num_units”), - * or the second dimension of the “bw_projection_weights”, if defined. - * * 23: The backward recurrent-to-forget weights. - * A 2-D tensor of shape [bw_num_units, bw_output_size]. - * * 24: The backward recurrent-to-cell weights. - * A 2-D tensor of shape [bw_num_units, bw_output_size]. - * * 25: The backward recurrent-to-output weights. - * A 2-D tensor of shape [bw_num_units, bw_output_size]. - * * 26: The backward cell-to-input weights. Optional. - * A 1-D tensor of shape [bw_num_units]. - * * 27: The backward cell-to-forget weights. Optional. - * A 1-D tensor of shape [bw_num_units]. - * * 28: The backward cell-to-output weights. Optional. - * A 1-D tensor of shape [bw_num_units]. - * * 29: The backward input gate bias. Optional. - * A 1-D tensor of shape [bw_num_units]. - * * 30: The backward forget gate bias. - * A 1-D tensor of shape [bw_num_units]. - * * 31: The backward cell gate bias. - * A 1-D tensor of shape [bw_num_units]. - * * 32: The backward output gate bias. - * A 1-D tensor of shape [bw_num_units]. - * * 33: The backward projection weights. Optional. - * A 2-D tensor of shape [bw_output_size, bw_num_units]. - * * 34: The backward projection bias. Optional. - * A 1-D tensor of shape [bw_output_size]. - * * 35: The forward input activation state. - * A 2-D tensor of shape [batch_size, bw_output_size]. - * * 36: The forward input cell state. - * A 2-D tensor of shape [batch_size, bw_num_units]. - * * 37: The backward input activation state. - * A 2-D tensor of shape [batch_size, bw_output_size]. - * * 38: The backward input cell state. - * A 2-D tensor of shape [batch_size, bw_num_units]. - * * 39: The auxiliary input. Optional. - * A 3-D tensor of shape [max_time, batch_size, aux_input_size], - * where “batch_size” corresponds to the batching dimension, and - * “aux_input_size” is the size of the auxiliary input. Optional. See - * the docs above for the usage modes explanation. - * * 40: The forward auxiliary input-to-input weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [fw_num_units, aux_input_size]. - * * 41: The forward auxiliary input-to-forget weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [fw_num_units, aux_input_size]. - * * 42: The forward auxiliary input-to-cell weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [fw_num_units, aux_input_size]. - * * 43: The forward auxiliary input-to-output weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [fw_num_units, aux_input_size]. - * * 44: The backward auxiliary input-to-input weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [bw_num_units, aux_input_size]. - * * 45: The backward auxiliary input-to-forget weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [bw_num_units, aux_input_size]. - * * 46: The backward auxiliary input-to-cell weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [bw_num_units, aux_input_size]. - * * 47: The backward auxiliary input-to-output weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [bw_num_units, aux_input_size]. - * * 48: The activation function. - * A value indicating the activation function: - * <ul> - * <li>0: None; - * <li>1: Relu; - * <li>3: Relu6; - * <li>4: Tanh; - * <li>6: Sigmoid. - * </ul> - * * 49: The clipping threshold for the cell state, such - * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 - * then clipping is disabled. - * If all the input tensors have type {@link OperandType::TENSOR_FLOAT32}, - * this scalar must be of the type {@link OperandType::FLOAT32}, - * otherwise if all the input tensors have the type - * {@link OperandType::TENSOR_FLOAT16}, this scalar must be - * of type {@link OperandType::FLOAT16}. - * * 50: The clipping threshold for the output from the - * projection layer, such that values are bound within - * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. - * If all the input tensors have type {@link OperandType::TENSOR_FLOAT32}, - * this scalar must be of the type {@link OperandType::FLOAT32}, - * otherwise if all the input tensors have the type - * {@link OperandType::TENSOR_FLOAT16}, this scalar must be - * of type {@link OperandType::FLOAT16}. - * * 51: merge_outputs - * An {@link OperandType::BOOL} scalar specifying if the outputs - * from forward and backward cells should be merged. - * * 52: time_major - * An {@link OperandType::BOOL} scalar specifying the shape format - * of input and output tensors. - * * 53: The forward input layer normalization weights. Optional. - * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs - * to activation at input gate. - * * 54: The forward forget layer normalization weights. Optional. - * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs - * to activation at forget gate. - * * 55: The forward cell layer normalization weights. Optional. - * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs - * to activation at cell gate. - * * 56: The forward output layer normalization weights. Optional. - * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs - * to activation at output gate. - * * 57: The backward input layer normalization weights. Optional. - * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs - * to activation at input gate. - * * 58: The backward forget layer normalization weights. Optional. - * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs - * to activation at forget gate. - * * 59: The backward cell layer normalization weights. Optional. - * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs - * to activation at cell gate. - * * 60: The backward output layer normalization weights. Optional. - * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs - * to activation at output gate. - * - * Outputs: - * * 0: The forward output. - * A 3-D tensor of shape: - * If time-major and not merge_outputs: - * [max_time, batch_size, fw_output_size] - * If time-major and merge_outputs: - * [max_time, batch_size, fw_output_size + bw_output_size] - * If batch-major and not merge_outputs: - * [batch_size, max_time, fw_output_size] - * If batch-major and merge_outputs: - * [batch_size, max_time, fw_output_size + bw_output_size] - * * 1: The backward output. Unused if merge_outputs is true. - * A 3-D tensor of shape: - * If time-major: [max_time, batch_size, bw_output_size] - * If batch-major: [batch_size, max_time, bw_output_size] - * * 2: The forward activation state output. - * A 2-D tensor of shape [batch_size, fw_output_size] containing an - * activation state from the last time step in the sequence. This - * output is optional and can be omitted. If this output is present - * then outputs 3-5 must be present as well. - * Available since HAL version 1.3. - * * 3: The forward cell state output. - * A tensor of shape [batch_size, fw_cell_size] containing a cell state - * from the last time step in the sequence. This output is optional - * and can be omitted. If this output is present - * then outputs 2, 4, 5 must be present as well. - * Available since HAL version 1.3. - * * 4: The backward activation state output. - * A 2-D tensor of shape [batch_size, bw_output_size] containing an - * activation state from the last time step in the sequence. This - * output is optional and can be omitted. If this output is present - * then outputs 2, 3, 5 must be present as well. - * Available since HAL version 1.3. - * * 5: The backward cell state output. - * A tensor of shape [batch_size, bw_cell_size] containing a cell state - * from the last time step in the sequence. This output is optional - * and can be omitted. If this output is present - * then outputs 2-4 must be present as well. - * Available since HAL version 1.3. - */ - BIDIRECTIONAL_SEQUENCE_LSTM = 42, - - /** - * A recurrent neural network layer that applies a basic RNN cell to a - * sequence of inputs in forward and backward directions. - * - * This Op unrolls the input along the sequence dimension, and implements - * the following operation for each element in the sequence s = - * 1...sequence_length: - * fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ + - * fw_state * fw_recurrent_weights’ + fw_bias) - * - * And for each element in sequence t = sequence_length : 1 - * bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ + - * bw_state * bw_recurrent_weights’ + bw_bias) - * - * Where: - * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs; - * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the - * current “state” which itself is the output from the previous time step - * computation; - * * “{fw,bw}_bias” is a bias vector (added to each output vector in the - * batch); - * * “activation” is the function passed as the “fused_activation_function” - * argument (if not “NONE”). - * - * The op supports cross-linking via an auxiliary input. Regular cell feeds - * one input into the two RNN cells in the following way: - * - * INPUT (INPUT_REVERSED) - * | | - * --------------------- - * | FW_RNN BW_RNN | - * --------------------- - * | | - * FW_OUT BW_OUT - * - * An op with cross-linking takes two inputs and feeds them into the RNN - * cells in the following way: - * - * AUX_INPUT (AUX_INPUT_REVERSED) - * | | - * INPUT | (INPUT_R'D.)| - * | | | | - * ----------------------- - * | \ / \ / | - * | FW_RNN BW_RNN | - * ----------------------- - * | | - * FW_OUT BW_OUT - * - * The cross-linking mode is enabled iff auxiliary input and auxiliary - * weights are present. While stacking this op on top of itself, this - * allows to connect both forward and backward outputs from previous cell - * to the next cell's input. - * - * Since HAL version 1.3 parallel linking mode is supported. The mode is - * enabled if auxiliary input is present but auxiliary weights are omitted. - * In this case, the cell feeds inputs into the RNN in the following way: - * - * INPUT (AUX_INPUT_REVERSED) - * | | - * --------------------- - * | FW_RNN BW_RNN | - * --------------------- - * | | - * FW_OUT BW_OUT - * - * While stacking this op on top of itself, this allows to connect both - * forward and backward outputs from previous cell to the next cell's - * corresponding inputs. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * The input tensors must all be the same type. - * - * Inputs: - * * 0: input. - * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If - * it is set to true, then the input has a shape [maxTime, batchSize, - * inputSize], otherwise the input has a shape [batchSize, maxTime, - * inputSize]. - * * 1: fwWeights. - * A 2-D tensor of shape [fwNumUnits, inputSize]. - * * 2: fwRecurrentWeights. - * A 2-D tensor of shape [fwNumUnits, fwNumUnits]. - * * 3: fwBias. - * A 1-D tensor of shape [fwNumUnits]. - * * 4: fwHiddenState. - * A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden - * state input for the first time step of the computation. - * * 5: bwWeights. - * A 2-D tensor of shape [bwNumUnits, inputSize]. - * * 6: bwRecurrentWeights. - * A 2-D tensor of shape [bwNumUnits, bwNumUnits]. - * * 7: bwBias. - * A 1-D tensor of shape [bwNumUnits]. - * * 8: bwHiddenState - * A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden - * state input for the first time step of the computation. - * * 9: auxInput. - * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If - * it is set to true, then the input has a shape [maxTime, batchSize, - * auxInputSize], otherwise the input has a shape [batchSize, maxTime, - * auxInputSize]. Can be omitted. See the docs above for the usage - * modes explanation. - * * 10:fwAuxWeights. - * A 2-D tensor of shape [fwNumUnits, auxInputSize]. Can be omitted. - * See the docs above for the usage modes explanation. - * * 11:bwAuxWeights. - * A 2-D tensor of shape [bwNumUnits, auxInputSize]. Can be omitted. - * See the docs above for the usage modes explanation. - * * 12:fusedActivationFunction. - * A {@link FusedActivationFunc} value indicating the activation function. If - * “NONE” is specified then it results in a linear activation. - * * 13:timeMajor - * An {@link OperandType::BOOL} scalar specifying the shape format - * of input and output tensors. - * * 14:mergeOutputs - * An {@link OperandType::BOOL} scalar specifying if the outputs - * from forward and backward cells are separate (if set to false) or - * concatenated (if set to true). - * Outputs: - * * 0: fwOutput. - * A 3-D tensor. The first two dimensions of the shape are defined by - * the input 6 (timeMajor) and the third dimension is defined by the - * input 14 (mergeOutputs). If timeMajor is set to true, then the first - * two dimensions are [maxTime, batchSize], otherwise they are set to - * [batchSize, maxTime]. If mergeOutputs is set to true, then the third - * dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set - * to fwNumUnits. - * * 1: bwOutput. - * A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then - * this tensor is not produced. The shape is defined by the input 6 - * (timeMajor). If it is set to true, then the shape is set to - * [maxTime, batchSize, bwNumUnits], otherwise the shape is set to - * [batchSize, maxTime, bwNumUnits]. - * * 2: The forward hidden state output. - * A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden - * state from the last time step in the sequence. This output is - * optional and can be omitted. If this output is present then output - * 3 must be present as well. - * Available since HAL version 1.3. - * * 3: The backward hidden state output. - * A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden - * state from the last time step in the sequence. This output is - * optional and can be omitted. If this output is present then output - * 2 must be present as well. - * Available since HAL version 1.3. - */ - BIDIRECTIONAL_SEQUENCE_RNN = 43, - - /** - * Greedily selects a subset of bounding boxes in descending order of score. - * - * This op applies NMS algorithm to each class. In each loop of execution, - * the box with maximum score gets selected and removed from the pending set. - * The scores of the rest of boxes are lowered according to the - * intersection-over-union (IOU) overlapping with the previously selected - * boxes and a specified NMS kernel method. Any boxes with score less - * than a threshold are removed from the pending set. - * - * Three NMS kernels are supported: - * * Hard: score_new = score_old * (1 if IoU < threshold else 0) - * * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU) - * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma) - * - * Axis-aligned bounding boxes are represented by its upper-left corner - * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid - * bounding box should satisfy x1 <= x2 and y1 <= y2. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Inputs: - * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score - * of each bounding box proposal. The boxes are grouped by batches in the - * first dimension. Zero num_rois is supported for this tensor. - * * 1: A 2-D Tensor specifying the bounding boxes of shape - * [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2]. - * The boxes are grouped by batches in the first dimension. The sequential - * order of the boxes corresponds with input0. For input0 of type - * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should be of - * {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and - * scale of 0.125. - * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM}, - * with zeroPoint of -128 and scale of 0.125. - * Zero num_rois is supported for this tensor. - * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape - * [num_rois], specifying the batch index of each box. Boxes with - * the same batch index are grouped together. - * * 3: An {@link OperandType::FLOAT32} scalar, score_threshold. Boxes - * with scores lower than the threshold are filtered before sending - * to the NMS algorithm. - * * 4: An {@link OperandType::INT32} scalar, specifying the maximum - * number of selected bounding boxes for each image. Set to a negative - * value for unlimited number of output bounding boxes. - * * 5: An {@link OperandType::INT32} scalar, specifying the NMS - * kernel method, options are 0:hard, 1:linear, 2:gaussian. - * * 6: An {@link OperandType::FLOAT32} scalar, specifying the IoU - * threshold in hard and linear NMS kernel. This field is ignored if - * gaussian kernel is selected. - * * 7: An {@link OperandType::FLOAT32} scalar, specifying the sigma in - * gaussian NMS kernel. This field is ignored if gaussian kernel is - * not selected. - * * 8: An {@link OperandType::FLOAT32} scalar, nms_score_threshold. - * Boxes with scores lower than the threshold are dropped during the - * score updating phase in soft NMS. - * - * Outputs: - * * 0: A 1-D Tensor of the same {@link OperandType} as input0, with shape - * [num_output_rois], specifying the score of each output box. The boxes - * are grouped by batches, but the sequential order in each batch is not - * guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM}, - * guaranteed. For type of {@link OperandType::TENSOR_QUANT8_ASYMM} - * or {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the scale and zero point must be the same as input0. - * * 1: A 2-D Tensor of the same {@link OperandType} as input1, with shape - * [num_output_rois, 4], specifying the coordinates of each - * output bounding box with the same format as input1. The sequential - * order of the boxes corresponds with output0. For type of - * {@link OperandType::TENSOR_QUANT16_ASYMM}, the scale must be - * 0.125 and the zero point must be 0. - * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape - * [num_output_rois], specifying the class of each output box. The - * sequential order of the boxes corresponds with output0. - * * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape - * [num_output_rois], specifying the batch index of each box. Boxes - * with the same batch index are grouped together. - */ - BOX_WITH_NMS_LIMIT = 44, - - /** - * Casts a tensor to a type. - * - * This operation ignores the scale and zeroPoint of quanized tensors, - * e.g. it treats a {@link OperandType::TENSOR_QUANT8_ASYMM} input - * as a tensor of uint8 values. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * Since HAL version 1.3, casting tensors of the following - * {@link OperandType} to the same {@link OperandType} is supported: - * * {@link OperandType::TENSOR_BOOL8} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT16_ASYMM} - * * {@link OperandType::TENSOR_QUANT16_SYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} - * * {@link OperandType::TENSOR_QUANT8_SYMM} - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: A tensor with the same shape as input0. - */ - CAST = 45, - - /** - * Shuffle the channels of the input tensor. - * - * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE - * divide the channel dimension into num_groups groups, and reorganize the - * channels by grouping channels with the same index in each group. - * - * Along the channel dimension, the output is calculated using this formula: - * - * output_channel[k * num_groups + g] = input_channel[g * group_size + k] - * - * where group_size = num_channels / num_groups - * - * The number of channels must be divisible by num_groups. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be shuffled. - * * 1: An {@link OperandType::INT32} scalar, specifying the number of - * groups. - * * 2: An {@link OperandType::INT32} scalar, specifying the dimension - * channel shuffle would be performed on. Negative index is used to - * specify axis from the end (e.g. -1 for the last axis). Must be in - * the range [-n, n). - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} and same shape as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - CHANNEL_SHUFFLE = 46, - - /** - * Apply postprocessing steps to bounding box detections. - * - * Bounding box detections are generated by applying transformation on a set - * of predefined anchors with the bounding box deltas from bounding box - * regression. A final step of hard NMS is applied to limit the number of - * returned boxes. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Inputs: - * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying - * the score of each anchor with each class. Class 0 for each - * [batches, num_anchors, 0] is background and will be ignored. - * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with - * the first four values in length_box_encoding specifying the bounding - * box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw], - * where dy and dx is the linear-scale relative correction factor for the - * center position of the bounding box with respect to the width and height, - * dh and dw is the log-scale relative correction factor for the width and - * height. All the entries in length_box_encoding beyond the first four - * values are ignored in this operation. - * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each - * predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and - * ctr_x are the center position of the box, and h and w are the height - * and the width. - * * 3: An {@link OperandType::FLOAT32} scalar, specifying the scaling - * factor for dy in bounding box deltas. - * * 4: An {@link OperandType::FLOAT32} scalar, specifying the scaling - * factor for dx in bounding box deltas. - * * 5: An {@link OperandType::FLOAT32} scalar, specifying the scaling - * factor for dh in bounding box deltas. - * * 6: An {@link OperandType::FLOAT32} scalar, specifying the scaling - * factor for dw in bounding box deltas. - * * 7: An {@link OperandType::BOOL} scalar, set to true to use regular - * multi-class NMS algorithm that do NMS separately for each class, - * set to false for a faster algorithm that only do one single NMS - * using the highest class score.. - * * 8: An {@link OperandType::INT32} scalar, max_num_detections, specifying - * the maximum number of boxes for the output. Boxes with the lowest - * scores are discarded to meet the limit. - * * 9: An {@link OperandType::INT32} scalar, only used when input7 is - * set to false, specifying the maximum number of classes per detection. - * * 10: An {@link OperandType::INT32} scalar, only used when input7 is - * set to true, specifying the maximum number of detections when - * applying NMS algorithm for each single class. - * * 11: A scalar, score_threshold. Boxes with scores lower than the - * threshold are filtered before sending to the NMS algorithm. The - * scalar must be of {@link OperandType::FLOAT16} if input0 is of - * {@link OperandType::TENSOR_FLOAT16} and of - * {@link OperandType::FLOAT32} if input0 is of - * {@link OperandType::TENSOR_FLOAT32}. - * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar - * must be of {@link OperandType::FLOAT16} if input0 is of - * {@link OperandType::TENSOR_FLOAT16} and of - * {@link OperandType::FLOAT32} if input0 is of - * {@link OperandType::TENSOR_FLOAT32}. - * * 13: An {@link OperandType::BOOL} scalar, set to true to include - * background class in the list of label map for the output, set - * to false to not include the background. When the background - * class is included, it has label 0 and the output classes start - * at 1 in the label map, otherwise, the output classes start at 0. - * - * Outputs: - * * 0: A 2-D tensor of the same {@link OperandType} as input0, with shape - * [batches, max_num_detections], specifying the score of each output - * detections. - * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the - * coordinates of each output bounding box, with format - * [y1, x1, y2, x2]. - * * 2: A 2-D {@link OperandType::TENSOR_INT32} tensor, of shape - * [batches, max_num_detections], specifying the class label for each - * output detection. - * * 3: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape [batches], - * specifying the number of valid output detections for each batch. - */ - DETECTION_POSTPROCESSING = 47, - - /** - * For input tensors x and y, computes x == y elementwise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_BOOL8} - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandType} and dimensions compatible - * with input0. - * - * Outputs: - * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. - */ - EQUAL = 48, - - /** - * Computes exponential of x element-wise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - */ - EXP = 49, - - /** - * Inserts a dimension of 1 into a tensor's shape. - * - * Given a tensor input, this operation inserts a dimension of 1 at the - * given dimension index of input's shape. The dimension index starts at - * zero; if you specify a negative dimension index, it is counted backward - * from the end. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: An {@link OperandType::INT32} scalar specifying the dimension - * index to expand. Must be in the range [-(n + 1), (n + 1)). - * - * Outputs: - * * 0: An (n + 1)-D tensor with the same {@link OperandType} and data as - * input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - EXPAND_DIMS = 50, - - /** - * Gathers values along an axis. - * - * Produces an output tensor with shape - * input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:] - * where: - * # Vector indices (output is rank(input0)). - * output[a_0, ..., a_n, i, b_0, ..., b_n] = - * input0[a_0, ..., a_n, indices[i], b_0, ..., b_n] - * - * # Higher rank indices (output is rank(input0) + rank(indices) - 1). - * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = - * input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: An n-D tensor from which to gather values. - * * 1: An {@link OperandType::INT32} scalar specifying the axis. - * Negative index is used to specify axis from the end - * (e.g. -1 for the last axis). Must be in the range [-n, n). - * * 2: A k-D tensor {@link OperandType::TENSOR_INT32} of indices. - * The values must be in the bounds of the corresponding dimensions - * of input0. - * - * Outputs: - * * 0: An (n + k - 1)-D tensor with the same {@link OperandType} as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - GATHER = 51, - - /** - * Generate aixs-aligned bounding box proposals. - * - * Bounding box proposals are generated by applying transformation on a set - * of predefined anchors with the bounding box deltas from bounding box - * regression. A final step of hard NMS is applied to limit the number of - * returned boxes. - * - * Axis-aligned bounding boxes are represented by its upper-left corner - * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid - * bounding box should satisfy x1 <= x2 and y1 <= y2. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Inputs: - * * 0: A 4-D Tensor specifying the score of each anchor at each - * location. With "NHWC" data layout, the tensor shape is - * [batches, height, width, num_anchors]. With "NCHW" data layout, - * the tensor shape is [batches, num_anchors, height, width]. - * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data - * layout, the tensor shape is [batches, height, width, num_anchors * 4]. - * With "NCHW" data layout, the tensor shape is - * [batches, num_anchors * 4, height, width]. The box deltas are encoded - * in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale - * relative correction factor for the center position of the bounding box - * with respect to the width and height, dw and dh is the log-scale - * relative correction factor for the width and height. The last - * dimensions is the channel dimension. - * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each - * predefined anchor, with format [x1, y1, x2, y2]. For input0 of type - * {@link OperandType::TENSOR_QUANT8_ASYMM} or - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of - * {@link OperandType::TENSOR_QUANT16_SYMM}, with scale of 0.125. - * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of - * each image in the batch, with format [image_height, image_width]. - * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM} or - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, this - * tensor should be of {@link OperandType::TENSOR_QUANT16_SYMM}, with - * scale of 0.125. - * * 4: An {@link OperandType::FLOAT32} scalar, specifying the ratio - * from the height of original image to the height of feature map. - * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio - * from the width of original image to the width of feature map. - * * 6: An {@link OperandType::INT32} scalar, specifying the maximum - * number of boxes before going into the hard NMS algorithm. Boxes - * with the lowest scores are discarded to meet the limit. Set to - * a non-positive value for unlimited number. - * * 7: An {@link OperandType::INT32} scalar, specifying the maximum - * number of boxes returning from the hard NMS algorithm. Boxes - * with the lowest scores are discarded to meet the limit. Set to - * a non-positive value for unlimited number. - * * 8: An {@link OperandType::FLOAT32} scalar, specifying the IoU - * threshold for hard NMS. - * * 9: An {@link OperandType::FLOAT32} scalar, min_size. Boxes with - * height or width lower than the absolute threshold are filtered out. - * * 10: An {@link OperandType::BOOL} scalar, set to true to specify - * NCHW data layout for input0 and input1. Set to false for NHWC. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0, of shape - * [num_output_rois], specifying the score of each output box. - * The boxes are grouped by batches, but the sequential order in - * each batch is not guaranteed. For type of - * {@link OperandType::TENSOR_QUANT8_ASYMM} or - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero - * point must be the same as input0. - * * 1: A tensor of the same {@link OperandType} as input3, of shape - * [num_output_rois, 4], specifying the coordinates of each output - * bounding box for each class, with format [x1, y1, x2, y2]. - * The sequential order of the boxes corresponds with output0. - * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the - * scale must be 0.125 and the zero point must be 0. - * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape - * [num_output_rois], specifying the batch index of each box. Boxes - * with the same batch index are grouped together. - */ - GENERATE_PROPOSALS = 52, - - /** - * For input tensors x and y, computes x > y elementwise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_BOOL8} - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandType} and dimensions compatible - * with input0. - * - * Outputs: - * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. - */ - GREATER = 53, - /** - * For input tensors x and y, computes x >= y elementwise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_BOOL8} - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandType} and dimensions compatible - * with input0. - * - * Outputs: - * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. - */ - GREATER_EQUAL = 54, - - /** - * Performs a grouped 2-D convolution operation. - * - * Given an input tensor of shape [batches, height, width, depth_in] and a - * filter tensor of shape [depth_out, filter_height, filter_width, depth_group] - * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV - * applies a group of different filters to each input channel group, then - * concatenates the results together. - * - * Specifically, the input channels are divided into num_groups groups, each with - * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional - * filters are also divided into num_groups groups, i.e. depth_out is divisible - * by num_groups. GROUPED_CONV applies each group of filters to the corresponding - * input channel group, and the result are concatenated together. - * - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * The values in the output tensor are computed as: - * - * output[b, i, j, g * channel_multiplier + q] = - * sum_{di, dj, dk} ( - * input[b, strides[1] * i + di, strides[2] * j + dj, - * g * depth_group + dk] * - * filter[g * channel_multiplier + q, di, dj, dk] - * ) + bias[channel] - * - * where channel_multiplier = depth_out / num_groups - * - * Supported tensor {@link OperandType} configurations: - * * 16 bit floating point: - * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias. - * - * * 32 bit floating point: - * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias. - * - * * Quantized: - * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output. - * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * * Quantized signed (since HAL version 1.3): - * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. - * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * * Quantized with symmetric per channel quantization for the filter: - * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output. - * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * * Quantized signed with filter symmetric per channel quantization - * (since HAL version 1.3): - * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. - * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input, where depth_in = num_groups * depth_group. - * * 1: A 4-D tensor, of shape - * [depth_out, filter_height, filter_width, depth_group], specifying - * the filter, where depth_out must be divisible by num_groups. For - * tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} - * the channel dimension (channelDim at - * {@link SymmPerChannelQuantParams}) must be set to 0. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link OperandType::TENSOR_FLOAT32} or - * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} - * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint - * of 0 and bias_scale == input_scale * filter_scale. For filter tensor - * of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias - * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of - * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link OperandType::INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 4: An {@link OperandType::INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 5: An {@link OperandType::INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 6: An {@link OperandType::INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 7: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 8: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 9: An {@link OperandType::INT32} scalar, specifying the number of - * groups. - * * 10: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 11: An {@link OperandType::BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input, where depth_in = num_groups * depth_group. - * * 1: A 4-D tensor, of shape - * [depth_out, filter_height, filter_width, depth_group], specifying - * the filter, where depth_out must be divisible by num_groups. For - * tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} - * the channel dimension (SymmPerChannelQuantParams::channelDim) - * must be set to 0. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link OperandType::TENSOR_FLOAT32} or - * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same - * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same type. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} - * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint - * of 0 and bias_scale == input_scale * filter_scale. For filter tensor - * of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias - * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of - * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link OperandType::INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. - * * 4: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 5: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 6: An {@link OperandType::INT32} scalar, specifying the number of - * groups. - * * 7: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 8: An {@link OperandType::BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - */ - GROUPED_CONV_2D = 55, - - /** - * Localize the maximum keypoints from heatmaps. - * - * This operation approximates the accurate maximum keypoint scores and - * indices after bicubic upscaling by using Taylor expansion up to the - * quadratic term. - * - * The bounding box is represented by its upper-left corner coordinate - * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. - * A valid bounding box should satisfy x1 <= x2 and y1 <= y2. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Inputs: - * * 0: A 4-D Tensor of shape - * [num_boxes, heatmap_size, heatmap_size, num_keypoints], - * specifying the heatmaps, the height and width of heatmaps should - * be the same, and must be greater than or equal to 2. - * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes, - * each with format [x1, y1, x2, y2]. For input0 of type - * {@link OperandType::TENSOR_QUANT8_ASYMM}, this tensor should - * be of {@link OperandType::TENSOR_QUANT16_ASYMM}, with zeroPoint - * of 0 and scale of 0.125. - * For input0 of type - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, this tensor - * should be of {@link OperandType::TENSOR_QUANT16_ASYMM}, with - * zeroPoint of -128 and scale of 0.125. - * * 2: An {@link OperandType::BOOL} scalar, set to true to specify - * NCHW data layout for input0. Set to false for NHWC. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0, with shape - * [num_boxes, num_keypoints], specifying score of the keypoints. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} or - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint can be different from input0 scale and zeroPoint. - * * 1: A tensor of the same {@link OperandType} as input1, with shape - * [num_boxes, num_keypoints, 2], specifying the location of - * the keypoints, the second dimension is organized as - * [keypoint_x, keypoint_y]. - * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the - * scale must be 0.125 and the zero point must be 0. - */ - HEATMAP_MAX_KEYPOINT = 56, - - /** - * Applies instance normalization to the input tensor. - * - * The values in the output tensor are computed as: - * - * output[b, h, w, c] = - * (input[b, h, w, c] - mean[b, c]) * gamma / - * sqrt(var[b, c] + epsilon) + beta - * - * Where the mean and variance are computed across the spatial dimensions: - * - * mean[b, c] = - * sum_{h, w}(input[b, h, w, c]) / sum(1) - * - * var[b, c] = - * sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1) - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be normalized. - * * 1: A scalar, specifying gamma, the scale applied to the normalized - * tensor. The scalar must be of {@link OperandType::FLOAT16} if - * input0 is of {@link OperandType::TENSOR_FLOAT16} and of - * {@link OperandType::FLOAT32} if input0 is of - * {@link OperandType::TENSOR_FLOAT32}. - * * 2: A scalar, specifying beta, the offset applied to the normalized - * tensor. The scalar must be of {@link OperandType::FLOAT16} if - * input0 is of {@link OperandType::TENSOR_FLOAT16} and of - * {@link OperandType::FLOAT32} if input0 is of - * {@link OperandType::TENSOR_FLOAT32}. - * * 3: A scalar, specifying epsilon, the small value added to variance to - * avoid dividing by zero. The scalar must be of {@link OperandType::FLOAT16} if - * input0 is of {@link OperandType::TENSOR_FLOAT16} and of - * {@link OperandType::FLOAT32} if input0 is of - * {@link OperandType::TENSOR_FLOAT32}. - * * 4: An {@link OperandType::BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} and same shape as input0. - */ - INSTANCE_NORMALIZATION = 57, - - /** - * For input tensors x and y, computes x < y elementwise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_BOOL8} - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandType} and dimensions compatible - * with input0. - * - * Outputs: - * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. - */ - LESS = 58, - - /** - * For input tensors x and y, computes x <= y elementwise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_BOOL8} - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandType} and dimensions compatible - * with input0. - * - * Outputs: - * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. - */ - LESS_EQUAL = 59, - - /** - * Computes natural logarithm of x element-wise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - */ - LOG = 60, - - /** - * Returns the truth value of x AND y element-wise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_BOOL8} - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. - * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions - * compatible with input0. - * - * Outputs: - * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. - */ - LOGICAL_AND = 61, - - /** - * Computes the truth value of NOT x element-wise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_BOOL8} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - */ - LOGICAL_NOT = 62, - - /** - * Returns the truth value of x OR y element-wise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_BOOL8} - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. - * * 1: A tensor of {@link OperandType::TENSOR_BOOL8} and dimensions - * compatible with input0. - * - * Outputs: - * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. - */ - LOGICAL_OR = 63, - - /** - * Computes the log softmax activations given logits. - * - * The output is calculated using this formula: - * - * output = logits * beta - log(reduce_sum(exp(logits * beta), axis)) - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor specifying the input logits. - * * 1: A scalar, specifying the positive scaling factor for the exponent, - * beta. - * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta - * value must be of {@link OperandType::FLOAT16}. - * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta - * value must be of {@link OperandType::FLOAT32}. - * * 2: An {@link OperandType::INT32} scalar specifying the axis to - * reduce across. Negative index is used to specify axis from the - * end (e.g. -1 for the last axis). Must be in the range [-n, n). - * - * Outputs: - * * 0: The output tensor of the same {@link OperandType} and shape as - * input0. - */ - LOG_SOFTMAX = 64, - - /** - * Returns the element-wise maximum of two tensors. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandType} and compatible dimensions - * with input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, - * the scales and zeroPoint can be different from input0 scale and zeroPoint. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - */ - MAXIMUM = 65, - - /** - * Returns the element-wise minimum of two tensors. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandType} and compatible dimensions - * with input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, - * the scales and zeroPoint can be different from input0 scale and zeroPoint. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - */ - MINIMUM = 66, - - /** - * Computes numerical negative value element-wise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - */ - NEG = 67, - - /** - * For input tensors x and y, computes x != y elementwise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_BOOL8} - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandType} and dimensions compatible - * with input0. - * - * Outputs: - * * 0: A tensor of {@link OperandType::TENSOR_BOOL8}. - */ - NOT_EQUAL = 68, - - /** - * Pads a tensor with the given constant value according to the specified - * paddings. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be padded. - * * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings - * for each spatial dimension of the input tensor. The shape of the - * tensor must be {rank(input0), 2}. - * padding[i, 0] specifies the number of elements to be padded in the - * front of dimension i. - * padding[i, 1] specifies the number of elements to be padded after - * the end of dimension i. - * * 2: A scalar specifying the value to use for padding input0. - * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the - * pad value must be of {@link OperandType::FLOAT16}. - * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the - * pad value must be of {@link OperandType::FLOAT32}. - * For input tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the pad value must be of {@link OperandType::INT32}. The - * scale and zeroPoint are assumed to be the same as in input0. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. The - * output tensor has the same rank as input0, and each - * dimension of the output tensor has the same size as the - * corresponding dimension of the input tensor plus the size - * of the padding: - * output0.dimension[i] = - * padding[i, 0] + input0.dimension[i] + padding[i, 1] - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - PAD_V2 = 69, - - /** - * Computes the power of one value to another. - * - * Given a tensor base and a tensor exponent, this operation computes - * base^exponent elementwise. - * - * This operations supports broadcasting. The size of the output is the - * maximum size along each dimension of the input operands. It starts with - * the trailing dimensions, and works its way forward. - * - * For example: - * base.dimension = {4, 1, 2} - * exponent.dimension = {5, 4, 3, 1} - * output.dimension = {5, 4, 3, 2} - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: A tensor specifying the base. - * * 1: A tensor specifying the exponent. - * - * Outputs: - * * 0: An output tensor. - */ - POW = 70, - - /** - * Parametric Rectified Linear Unit. - * - * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha - * is a learned array with the same {@link OperandType} and compatible - * dimensions as input x. - * - * Two dimensions are compatible when: - * 1. they are equal, or - * 2. one of them is 1 - * - * The size of the output is the maximum size along each dimension of the - * input operands. It starts with the trailing dimensions, and works its way - * forward. - * - * Example: - * input.dimension = {4, 1, 2} - * alpha.dimension = {5, 4, 3, 1} - * output.dimension = {5, 4, 3, 2} - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: A tensor, specifying the input. - * * 1: A tensor of the same {@link OperandType}, and compatible dimensions - * as input0, specifying the alpha. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scales and zeroPoint can be different from input0 scale and zeroPoint. - */ - PRELU = 71, - - /** - * Quantizes the input tensor. - * - * The formula for {@link OperandType::TENSOR_QUANT8_ASYMM} output tensor is: - * - * output = max(0, min(255, round(input / scale) + zeroPoint) - * - * The formula for {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} output - * tensor is: - * - * output = max(-128, min(127, round(input / scale) + zeroPoint) - * - * Supported input tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported output tensor {@link OperandType}: - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: A tensor, may be zero-sized. - * - * Outputs: - * * 0: The output tensor of same shape as input0, but with - * {@link OperandType::TENSOR_QUANT8_ASYMM} or. - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}. - */ - QUANTIZE = 72, - - /** - * A version of quantized LSTM, using 16 bit quantization for internal - * state. - * - * There is no projection layer, so cell state size is equal to the output - * size. - * - * Inputs: - * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM} - * and shape [numBatches, inputSize] specifying the input to the LSTM - * cell. Tensor is quantized with a fixed quantization range of - * [-1, 127/128] (scale = 1/128, zeroPoint = 128). - * * 1: The input-to-input weights. - * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM} - * and shape [outputSize, inputSize] specifying input-to-input part of - * weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 2: The input-to-forget weights. - * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM} - * and shape [outputSize, inputSize] specifying input-to-forget part of - * weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 3: The input-to-cell weights. - * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM} - * and shape [outputSize, inputSize] specifying input-to-cell part of - * weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 4: The input-to-output weights. - * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM} - * and shape [outputSize, inputSize] specifying input-to-output part of - * weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 5: The recurrent-to-input weights. - * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM} - * and shape [outputSize, outputSize] specifying recurrent-to-input part - * of weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 6: The recurrent-to-forget weights. - * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM} - * and shape [outputSize, outputSize] specifying recurrent-to-forget - * part of weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 7: The recurrent-to-cell weights. - * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM} - * and shape [outputSize, outputSize] specifying recurrent-to-cell part - * of weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 8: The recurrent-to-output weights. - * A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM} - * and shape [outputSize, outputSize] specifying recurrent-to-output - * part of weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 9: The input gate bias. - * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape - * [outputSize] specifying the bias for the fully-connected layer - * inside the LSTM cell. Bias is quantized with scale being a product - * of input and weights scales and zeroPoint equal to 0. - * * 10:The forget gate bias. - * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape - * [outputSize] specifying the bias for the fully-connected layer - * inside the LSTM cell. Bias is quantized with scale being a product - * of input and weights scales and zeroPoint equal to 0. - * * 11:The cell bias. - * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape - * [outputSize] specifying the bias for the fully-connected layer - * inside the LSTM cell. Bias is quantized with scale being a product - * of input and weights scales and zeroPoint equal to 0. - * * 12:The output gate bias. - * A 1-D tensor of type {@link OperandType::TENSOR_INT32} and shape - * [outputSize] specifying the bias for the fully-connected layer - * inside the LSTM cell. Bias is quantized with scale being a product - * of input and weights scales and zeroPoint equal to 0. - * * 13: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM} - * and shape [numBatches, outputSize] specifying the cell state from the - * previous time step of the LSTM cell. It is quantized using a - * quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / - * 32768, zeroPoint = 0). - * * 14: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM} - * and shape [numBathes, outputSize] specifying the output of the LSTM - * cell from previous time-step. Tensor is quantized with a fixed - * quantization range of [-1, 127/128] (scale = 1/128, zeroPoint = - * 128). - * - * - * Outputs: - * * 0: A 2-D tensor of type {@link OperandType::TENSOR_QUANT16_SYMM} - * and shape [numBatches, outputSize] which contains a cell state from - * the current time step. Tensor is quantized using a quantization - * range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint = - * 0). - * * 1: A 2-D tensor of type {@link OperandType::TENSOR_QUANT8_ASYMM} - * and shape [numBathes, outputSize] which contains the output value. - * Tensor is quantized with a fixed quantization range of [-1, 127/128] - * (scale = 1/128, zeroPoint = 128). - */ - QUANTIZED_16BIT_LSTM = 73, - - /** - * Draws samples from a multinomial distribution. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Inputs: - * * 0: A 2-D tensor with shape [batches, classes], specifying the - * unnormalized log-probabilities for all classes. - * * 1: A scalar {@link OperandType::INT32}, specifying the number of - * independent samples to draw for each row slice. - * * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor with shape [2], - * specifying seeds used to initialize the random distribution. If both - * provided seeds are 0, both will be randomly generated. - * Outputs: - * * 0: A 2-D {@link OperandType::TENSOR_INT32} tensor with shape - * [batches, samples], containing the drawn samples. - */ - RANDOM_MULTINOMIAL = 74, - - /** - * Reduces a tensor by computing the "logical and" of elements along given - * dimensions. - * - * If keep_dims is true, the reduced dimensions are - * retained with length 1. Otherwise, the rank of the tensor is reduced by - * 1 for each entry in dimensions. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_BOOL8} - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions - * to reduce. Dimension values must be in the range [-n, n). - * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - */ - REDUCE_ALL = 75, - - /** - * Reduces a tensor by computing the "logical or" of elements along given - * dimensions. - * - * If keep_dims is true, the reduced dimensions are - * retained with length 1. Otherwise, the rank of the tensor is reduced by - * 1 for each entry in dimensions. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_BOOL8} - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions - * to reduce. Dimension values must be in the range [-n, n). - * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - */ - REDUCE_ANY = 76, - - /** - * Reduces a tensor by computing the maximum of elements along given - * dimensions. - * - * If keep_dims is true, the reduced dimensions are - * retained with length 1. Otherwise, the rank of the tensor is reduced by - * 1 for each entry in dimensions. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions - * to reduce. Dimension values must be in the range [-n, n). - * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - REDUCE_MAX = 77, - - /** - * Reduces a tensor by computing the minimum of elements along given - * dimensions. - * - * If keep_dims is true, the reduced dimensions are - * retained with length 1. Otherwise, the rank of the tensor is reduced by - * 1 for each entry in dimensions. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions - * to reduce. Dimension values must be in the range [-n, n). - * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - REDUCE_MIN = 78, - - /** - * Reduces a tensor by multiplying elements along given dimensions. - * - * If keep_dims is true, the reduced dimensions are - * retained with length 1. Otherwise, the rank of the tensor is reduced by - * 1 for each entry in dimensions. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions - * to reduce. Dimension values must be in the range [-n, n). - * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - */ - REDUCE_PROD = 79, - - /** - * Reduces a tensor by summing elements along given dimensions. - * - * If keep_dims is true, the reduced dimensions are - * retained with length 1. Otherwise, the rank of the tensor is reduced by - * 1 for each entry in dimensions. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: A 1-D tensor of {@link OperandType::TENSOR_INT32}. The dimensions - * to reduce. Dimension values must be in the range [-n, n). - * * 2: An {@link OperandType::BOOL} scalar, keep_dims. If true, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - */ - REDUCE_SUM = 80, - - /** - * Select and scale the feature map of each region of interest to a unified - * output size by average pooling sampling points from bilinear interpolation. - * - * The region of interest is represented by its upper-left corner coordinate - * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. - * A spatial scaling factor is applied to map into feature map coordinate. - * A valid region of interest should satisfy x1 <= x2 and y1 <= y2. - * - * No rounding is applied in this operation. The sampling points are unified - * distributed in the pooling bin and their values are calculated by bilinear - * interpolation. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Inputs: - * * 0: A 4-D tensor, specifying the feature map. - * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of - * the regions of interest, each line with format [x1, y1, x2, y2]. - * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM}, - * this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM}, - * with zeroPoint of 0 and scale of 0.125. Zero num_rois is - * supported for this tensor. - * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape - * [num_rois], specifying the batch index of each box. Boxes with - * the same batch index are grouped together. Zero num_rois is - * supported for this tensor. - * * 3: An {@link OperandType::INT32} scalar, specifying the output - * height of the output tensor. - * * 4: An {@link OperandType::INT32} scalar, specifying the output - * width of the output tensor. - * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio - * from the height of original image to the height of feature map. - * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio - * from the width of original image to the width of feature map. - * * 7: An {@link OperandType::INT32} scalar, specifying the number of - * sampling points in height dimension used to compute the output. - * Set to 0 for adaptive value of ceil(roi_height/out_height). - * * 8: An {@link OperandType::INT32} scalar, specifying the number of - * sampling points in width dimension used to compute the output. - * Set to 0 for adaptive value of ceil(roi_width/out_width). - * * 9: An {@link OperandType::BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. The output - * shape is [num_rois, out_height, out_width, depth]. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint can be different from the input0 scale and zeroPoint. - */ - ROI_ALIGN = 81, - - /** - * Select and scale the feature map of each region of interest to a unified - * output size by max-pooling. - * - * The region of interest is represented by its upper-left corner coordinate - * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. - * A spatial scaling factor is applied to map into feature map coordinate. - * A valid region of interest should satisfy x1 <= x2 and y1 <= y2. - * - * Rounding is applied in this operation to ensure integer boundary for - * regions of interest and pooling bins. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Inputs: - * * 0: A 4-D tensor, specifying the feature map. - * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of - * the regions of interest, each line with format [x1, y1, x2, y2]. - * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * this tensor should be of {@link OperandType::TENSOR_QUANT16_ASYMM}, - * with zeroPoint of 0 and scale of 0.125. - * * 2: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape - * [num_rois], specifying the batch index of each box. Boxes with - * the same batch index are grouped together. - * * 3: An {@link OperandType::INT32} scalar, specifying the output - * height of the output tensor. - * * 4: An {@link OperandType::INT32} scalar, specifying the output - * width of the output tensor. - * * 5: An {@link OperandType::FLOAT32} scalar, specifying the ratio - * from the height of original image to the height of feature map. - * * 6: An {@link OperandType::FLOAT32} scalar, specifying the ratio - * from the width of original image to the width of feature map. - * * 7: An {@link OperandType::BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Outputs: - * * 0: A tensor of the same {@link OperandType} as input0. The output - * shape is [num_rois, out_height, out_width, depth]. - * For input0 of type {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - ROI_POOLING = 82, - - /** - * Computes reciprocal of square root of x element-wise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - */ - RSQRT = 83, - - /** - * Using a tensor of booleans c and input tensors x and y select values - * elementwise from both input tensors: - * - * O[i] = C[i] ? x[i] : y[i]. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: A tensor of type {@link OperandType::TENSOR_BOOL8} acting as a - * mask that chooses, based on the value at each element, whether the - * corresponding element in the output should be taken from input1 (if - * true) or input2 (if false). - * * 1: An input tensor of the same shape as input0. - * * 2: An input tensor of the same shape and type as input1. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} - * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scales and zeroPoint can be different from input1 scale and zeroPoint. - * - * Outputs: - * * 0: A tensor of the same type and shape as input1 and input2. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - */ - SELECT = 84, - - /** - * Computes sin of x element-wise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - */ - SIN = 85, - - /** - * Extracts a slice of specified size from the input tensor starting at a - * specified location. - * - * The starting location is specified as a 1-D tensor containing offsets - * for each dimension. The size is specified as a 1-D tensor containing - * either size of a slice along corresponding dimension or -1. In the latter - * case, all the remaining elements in dimension are included in the slice. - * - * A sum of begin offset and a size of a slice must not exceed size of a - * corresponding dimension. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: An n-D tensor to take slice from, may be zero-sized. - * * 1: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying - * the beginning indices of the slice in each dimension. - * * 2: A 1-D tensor of type {@link OperandType::TENSOR_INT32} specifying - * the size of the slice in each dimension. - * - * Outputs: - * * 0: An n-D tensor of the same type as the input containing the slice. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * its scale and zeroPoint has to be same as the input0 scale and zeroPoint. - */ - SLICE = 86, - - /** - * Splits a tensor along a given axis into num_splits subtensors. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: An n-D tensor to split. - * * 1: An {@link OperandType::INT32} scalar specifying the axis along - * which to split. - * * 2: An {@link OperandType::INT32} scalar indicating the number of - * splits along given axis. Must evenly divide axis size. - * - * Outputs: - * * 0 ~ (num_splits - 1): Resulting subtensors. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - SPLIT = 87, - - /** - * Computes square root of x element-wise. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - */ - SQRT = 88, - - /** - * Constructs a tensor by tiling a given tensor. - * - * This operation creates a new tensor by replicating `input` `multiples` - * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]` - * elements, and the values of `input` are replicated `multiples[i]` times - * along the i-th dimension. - * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: input, an n-D tensor specifying the input. - * * 1: multiples, a 1-D tensor of {@link OperandType::TENSOR_INT32}. - * The length of multiples must be n. - * - * Outputs: - * * 0: A tiled tensor of the same {@link OperandType} and rank as `input`. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - TILE = 89, - - /** - * Finds values and indices of the k largest entries for the last dimension. - * - * Resulting values in each dimensions are sorted in descending order. If - * two values are equal, the one with larger index appears first. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since HAL version 1.3) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: input, an n-D tensor specifying the input. - * * 1: k, an {@link OperandType::INT32} scalar, specifying the number of - * top elements to look for along the last dimension. - * - * Outputs: - * * 0: An n-D tensor of the same type as the input, containing the k - * largest elements along each last dimensional slice. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * * 1: An n-D tensor of type {@link OperandType::TENSOR_INT32} - * containing the indices of values within the last dimension of input. - */ - TOPK_V2 = 90, - - /** - * Performs the transpose of 2-D convolution operation. - * - * This operation is sometimes called "deconvolution" after Deconvolutional - * Networks, but is actually the transpose (gradient) of - * {@link OperandType::CONV_2D} rather than an actual deconvolution. - * - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * Supported tensor {@link OperandType} configurations: - * * 16 bit floating point: - * * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias. - * - * * 32 bit floating point: - * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias. - * - * * Quantized: - * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output. - * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * * Quantized with symmetric per channel quantization for the filter: - * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, and output. - * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Available since HAL version 1.3: - * * Quantized signed (since HAL version 1.3): - * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. - * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * * Quantized signed with filter symmetric per channel quantization - * (since HAL version 1.3): - * * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. - * * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link OperandType::TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * * 1: A 4-D tensor, of shape - * [depth_out, filter_height, filter_width, depth_in], specifying the - * filter. For tensor of type - * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel - * dimension (SymmPerChannelQuantParams::channelDim) must be set to 0. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link OperandType::TENSOR_FLOAT32} or - * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the - * same type. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} - * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link OperandType::TENSOR_INT32}, - * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, - * the bias must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 - * and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link OperandType::INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 4: An {@link OperandType::INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 5: An {@link OperandType::INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 6: An {@link OperandType::INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 7: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 8: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 9: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 10: An {@link OperandType::BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * * 1: A 4-D tensor, of shape - * [depth_out, filter_height, filter_width, depth_in], specifying the - * filter. For tensor of type - * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel - * dimension (SymmPerChannelQuantParams::channelDim) must be set to 0. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link OperandType::TENSOR_FLOAT32} or - * {@link OperandType::TENSOR_FLOAT16}, the bias should be of the - * same type. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM} - * and {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link OperandType::TENSOR_INT32}, - * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. - * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, - * the bias must be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 - * and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link OperandType::TENSOR_INT32} tensor, specifying the output - * tensor shape. - * * 4: An {@link OperandType::INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. - * * 5: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 6: An {@link OperandType::INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 7: An {@link OperandType::INT32} scalar, and has to be one of the - * {@link FusedActivationFunc} values. Specifies the activation to - * invoke on the result. - * * 8: An {@link OperandType::BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - */ - TRANSPOSE_CONV_2D = 91, - - /** - * A recurrent neural network specified by an LSTM cell. - * - * Performs (fully) dynamic unrolling of input. - * - * This Op unrolls the input along the time dimension, and implements the - * following operation for each element in the sequence - * s = 1...sequence_length: - * outputs[s] = projection(state = activation(LSTMOp(inputs[s]))) - * - * Where LSTMOp is the LSTM op as in {@link OperandType::LSTM}, - * the "projection" is an optional projection layer from state and output - * and the “activation” is the function passed as the - * “fused_activation_function” argument (if not “NONE”). - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: 3, either time-major or batch-major. - * - * All input and output tensors must be of the same type. - * - * Inputs: - * * 0: The input (\f$x_t\f$). - * A 3-D tensor of shape: - * If time-major: [max_time, batch_size, input_size] - * If batch-major: [batch_size, max_time, input_size] - * where “max_time” is the number of timesteps (sequence length), - * “batch_size” corresponds to the batching dimension, and - * “input_size” is the size of the input. - * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. - * A 2-D tensor of shape [num_units, input_size], where “num_units” - * corresponds to the number of cell units. - * * 2: The input-to-forget weights (\f$W_{xf}\f$). - * A 2-D tensor of shape [num_units, input_size]. - * * 3: The input-to-cell weights (\f$W_{xc}\f$). - * A 2-D tensor of shape [num_units, input_size]. - * * 4: The input-to-output weights (\f$W_{xo}\f$). - * A 2-D tensor of shape [num_units, input_size]. - * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. - * A 2-D tensor of shape [num_units, output_size], where “output_size” - * corresponds to either the number of cell units (i.e., “num_units”), - * or the second dimension of the “projection_weights”, if defined. - * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). - * A 2-D tensor of shape [num_units, output_size]. - * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). - * A 2-D tensor of shape [num_units, output_size]. - * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). - * A 2-D tensor of shape [num_units, output_size]. - * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 12:The input gate bias (\f$b_i\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 13:The forget gate bias (\f$b_f\f$). - * A 1-D tensor of shape [num_units]. - * * 14:The cell bias (\f$b_c\f$). - * A 1-D tensor of shape [num_units]. - * * 15:The output gate bias (\f$b_o\f$). - * A 1-D tensor of shape [num_units]. - * * 16:The projection weights (\f$W_{proj}\f$). Optional. - * A 2-D tensor of shape [output_size, num_units]. - * * 17:The projection bias (\f$b_{proj}\f$). Optional. - * A 1-D tensor of shape [output_size]. - * * 18:The output state (in) (\f$h_{t-1}\f$). - * A 2-D tensor of shape [batch_size, output_size]. - * * 19:The cell state (in) (\f$C_{t-1}\f$). - * A 2-D tensor of shape [batch_size, num_units]. - * * 20:The activation function (\f$g\f$). - * A value indicating the activation function: - * <ul> - * <li>0: None; - * <li>1: Relu; - * <li>3: Relu6; - * <li>4: Tanh; - * <li>6: Sigmoid. - * </ul> - * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such - * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 - * then clipping is disabled. - * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the - * projection layer, such that values are bound within - * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. - * * 23:Time-major if true, batch-major if false. - * * 24:The input layer normalization weights. Optional. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at input gate. - * * 25:The forget layer normalization weights. Optional. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at forget gate. - * * 26:The cell layer normalization weights. Optional. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at cell gate. - * * 27:The output layer normalization weights. Optional. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at output gate. - * - * Outputs: - * * 0: The output (\f$o_t\f$). - * A 3-D tensor of shape: - * If time-major: [max_time, batch_size, output_size] - * If batch-major: [batch_size, max_time, output_size] - * * 1: A tensor of shape [batch_size, output_size] containing a hidden - * state from the last time step in the sequence. This output is - * optional and can be omitted. If this output is present then - * output #2 must be present as well. - * Available since HAL version 1.3. - * * 2: A tensor of shape [batch_size, cell_size] containing a cell state - * from the last time step in the sequence. This output is optional - * and can be omitted. - * Available since HAL version 1.3. - */ - UNIDIRECTIONAL_SEQUENCE_LSTM = 92, - - /** - * A recurrent neural network layer that applies a basic RNN cell to a - * sequence of inputs. - * - * This layer unrolls the input along the sequence dimension, and implements - * the following operation - * for each element in the sequence s = 1...sequence_length: - * outputs[s] = state = activation(inputs[s] * input_weights’ + state * - * recurrent_weights’ + bias) - * - * Where: - * * “input_weights” is a weight matrix that multiplies the inputs; - * * “recurrent_weights” is a weight matrix that multiplies the current - * “state” which itself is the output from the previous time step - * computation; - * * “bias” is a bias vector (added to each output vector in the batch); - * * “activation” is the function passed as the “fused_activation_function” - * argument (if not “NONE”). - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * The input tensors must all be the same type. - * - * Inputs: - * * 0: input. - * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If - * it is set to 1, then the input has a shape [maxTime, batchSize, - * inputSize], otherwise the input has a shape [batchSize, maxTime, - * inputSize]. - * * 1: weights. - * A 2-D tensor of shape [numUnits, inputSize]. - * * 2: recurrent_weights. - * A 2-D tensor of shape [numUnits, numUnits]. - * * 3: bias. - * A 1-D tensor of shape [numUnits]. - * * 4: hidden state - * A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden - * state input for the first time step of the computation. - * * 5: fusedActivationFunction. - * A {@link FusedActivationFunc} value indicating the activation function. If - * “NONE” is specified then it results in a linear activation. - * * 6: timeMajor - * An {@link OperandType::INT32} scalar specifying the shape format - * of input and output tensors. Must be set to either 0 or 1. - * Outputs: - * * 0: output. - * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If - * it is set to 1, then the output has a shape [maxTime, batchSize, - * numUnits], otherwise the output has a shape [batchSize, maxTime, - * numUnits]. - */ - UNIDIRECTIONAL_SEQUENCE_RNN = 93, - - /** - * Resizes images to given size using the nearest neighbor interpretation. - * - * Resized images must be distorted if their output aspect ratio is not the - * same as input aspect ratio. The corner pixels of output may not be the - * same as corner pixels of input. - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Both resizing by shape and resizing by scale are supported. - * - * Inputs (resizing by shape): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. Zero batches is supported for this tensor. - * * 1: An {@link OperandType::INT32} scalar, specifying the output - * width of the output tensor. - * * 2: An {@link OperandType::INT32} scalar, specifying the output - * height of the output tensor. - * * 3: An {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * * 4: Align corners. An optional {@link OperandType::BOOL} - * scalar, default to false. If True, the centers of the 4 corner - * pixels of the input and output tensors are aligned, preserving the - * values at the corner pixels. - * Available since HAL version 1.3. - * * 5: Half pixel centers. An optional {@link OperandType::BOOL} - * scalar, default to false. If True, the pixel centers are assumed to - * be at (0.5, 0.5). This is the default behavior of image.resize in - * TF 2.0. If this parameter is True, then align_corners parameter - * must be False. - * Available since HAL version 1.3. - * - * Inputs (resizing by scale): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. Zero batches is supported for this tensor. - * * 1: A scalar, specifying width_scale, the scaling factor of the width - * dimension from the input tensor to the output tensor. The output - * width is calculated as new_width = floor(width * width_scale). - * The scalar must be of {@link OperandType::FLOAT16} if input0 is - * of {@link OperandType::TENSOR_FLOAT16} and of - * {@link OperandType::FLOAT32} otherwise. - * * 2: A scalar, specifying height_scale, the scaling factor of the height - * dimension from the input tensor to the output tensor. The output - * height is calculated as new_height = floor(height * height_scale). - * The scalar must be of {@link OperandType::FLOAT16} if input0 is - * of {@link OperandType::TENSOR_FLOAT16} and of - * {@link OperandType::FLOAT32} otherwise. - * * 3: An {@link OperandType::BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * * 4: Align corners. An optional {@link OperandType::BOOL} - * scalar, default to false. If True, the centers of the 4 corner - * pixels of the input and output tensors are aligned, preserving the - * values at the corner pixels. - * Available since HAL version 1.3. - * * 5: Half pixel centers. An optional {@link OperandType::BOOL} - * scalar, default to false. If True, the pixel centers are assumed to - * be at (0.5, 0.5). This is the default behavior of image.resize in - * TF 2.0. If this parameter is True, then align_corners parameter - * must be False. - * Available since HAL version 1.3. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, new_height, new_width, depth]. - * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and - * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - */ - RESIZE_NEAREST_NEIGHBOR = 94, - - /** - * Quantized version of {@link OperationType::LSTM}. - * - * The input and the output use asymmetric quantized types, while the rest - * use symmetric ones. - * - * Inputs: - * * 0: The input to the LSTM cell. - * Type: {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} - * Shape: [batchSize, inputSize] - * * 1: The input-to-input weights. Optional. - * Type: {@link OperandType::TENSOR_QUANT8_SYMM} - * Shape: [numUnits, inputSize] - * * 2: The input-to-forget weights. - * Type: {@link OperandType::TENSOR_QUANT8_SYMM} - * Shape: [numUnits, inputSize] - * * 3: The input-to-cell weights. - * Type: {@link OperandType::TENSOR_QUANT8_SYMM} - * Shape: [numUnits, inputSize] - * * 4: The input-to-output weights. - * Type: {@link OperandType::TENSOR_QUANT8_SYMM} - * Shape: [numUnits, inputSize] - * * 5: The recurrent-to-input weights. Optional. - * Type: {@link OperandType::TENSOR_QUANT8_SYMM} - * Shape: [numUnits, outputSize] - * * 6: The recurrent-to-forget weights. - * Type: {@link OperandType::TENSOR_QUANT8_SYMM} - * Shape: [numUnits, outputSize] - * * 7: The recurrent-to-cell weights. - * Type: {@link OperandType::TENSOR_QUANT8_SYMM} - * Shape: [numUnits, outputSize] - * * 8: The recurrent-to-output weights. - * Type: {@link OperandType::TENSOR_QUANT8_SYMM} - * Shape: [numUnits, outputSize] - * * 9: The cell-to-input weights (for peephole). Optional. - * Type: {@link OperandType::TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 10: The cell-to-forget weights (for peephole). Optional. - * Type: {@link OperandType::TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 11: The cell-to-output weights (for peephole). Optional. - * Type: {@link OperandType::TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 12: The input gate bias. Quantized with scale being the - * product of input and weights scales and zeroPoint equal to 0. - * Optional. - * Type: {@link OperandType::TENSOR_INT32} - * Shape: [numUnits] - * * 13: The forget gate bias. Quantized with scale being the - * product of input and weights scales and zeroPoint equal to 0. - * Type: {@link OperandType::TENSOR_INT32} - * Shape: [numUnits] - * * 14: The cell bias. Quantized with scale being the - * product of input and weights scales and zeroPoint equal to 0. - * Type: {@link OperandType::TENSOR_INT32} - * Shape: [numUnits] - * * 15: The output gate bias. Quantized with scale being the - * product of input and weights scales and zeroPoint equal to 0. - * Type: {@link OperandType::TENSOR_INT32} - * Shape: [numUnits] - * * 16: The projection weights. Optional. - * Type: {@link OperandType::TENSOR_QUANT8_SYMM} - * Shape: [outputSize, numUnits] - * * 17: The projection bias. Quantized with scale being the - * product of input and weights scales and zeroPoint equal to 0. - * Optional. - * Type: {@link OperandType::TENSOR_INT32} - * Shape: [outputSize] - * * 18: The output from the previous time step. - * Type: {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} - * Shape: [batchSize, outputSize] - * * 19: The cell state from the previous time step. - * Type: {@link OperandType::TENSOR_QUANT16_SYMM} - * Shape: [batchSize, numUnits] - * * 20: The input layer normalization weights. Used to rescale - * normalized inputs to activation at input gate. Optional. - * Type: {@link OperandType::TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 21: The forget layer normalization weights. Used to - * rescale normalized inputs to activation at forget gate. Optional. - * Type: {@link OperandType::TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 22: The cell layer normalization weights. Used to rescale - * normalized inputs to activation at cell gate. Optional. - * Type: {@link OperandType::TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 23: The output layer normalization weights. Used to - * rescale normalized inputs to activation at output gate. Optional. - * Type: {@link OperandType::TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 24: The cell clip. If provided the cell state is clipped - * by this value prior to the cell output activation. Optional. - * Type: {@link OperandType::FLOAT32}. - * * 25: The projection clip. If provided and projection is enabled, - * this is used for clipping the projected values. Optional. - * Type: {@link OperandType::FLOAT32}. - * * 26: The scale of the intermediate result of matmul, - * i.e. input to layer normalization, at input gate. - * Type: {@link OperandType::FLOAT32}. - * * 27: The scale of the intermediate result of matmul, - * i.e. input to layer normalization, at forget gate. - * Type: {@link OperandType::FLOAT32}. - * * 28: The scale of the intermediate result of matmul, - * i.e. input to layer normalization, at cell gate. - * Type: {@link OperandType::FLOAT32}. - * * 29: The scale of the intermediate result of matmul, - * i.e. input to layer normalization, at output gate. - * Type: {@link OperandType::FLOAT32}. - * * 30: The zero point of the hidden state, i.e. input to - * projection. - * Type: {@link OperandType::INT32}. - * * 31: The scale of the hidden state, i.e. input to - * projection. - * Type: {@link OperandType::FLOAT32}. - * - * Outputs: - * * 0: The output state (out). - * Type: {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} - * Shape: [batchSize, outputSize] - * * 1: The cell state (out). - * Type: {@link OperandType::TENSOR_QUANT16_SYMM} - * Shape: [batchSize, numUnits] - * * 2: The output. This is effectively the same as the current - * "output state (out)" value. - * Type: {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} - * Shape: [batchSize, outputSize] - */ - QUANTIZED_LSTM = 95, - - /** - * Executes one of the two referenced subgraphs as determined by a boolean - * value. - * - * The inputs and outputs of the two referenced subgraphs must agree with the - * signature of this operation. That is, if the operation has (3 + n) inputs - * and m outputs, both subgraphs must have n inputs and m outputs with the same - * types, ranks, dimensions, scales, - * zeroPoints, and extraParams as the corresponding operation - * inputs and outputs. - * All of the operands mentioned must have fully specified dimensions. - * - * Inputs: - * * 0: A value of type {@link OperandType::TENSOR_BOOL8} and shape [1] - * that determines which of the two referenced subgraphs to execute. - * The operand must have fully specified dimensions. - * * 1: A {@link OperandType::SUBGRAPH} reference to the subgraph to be - * executed if the condition is true. - * * 2: A {@link OperandType::SUBGRAPH} reference to the subgraph to be - * executed if the condition is false. - * * 3 ~ (n + 2): Inputs to be passed to the subgraph selected for execution. - * - * Outputs: - * * 0 ~ (m - 1): Outputs produced by the selected subgraph. - */ - IF = 96, - - /** - * Executes the body subgraph until the condition subgraph outputs false. - * - * The inputs to this operation are the condition subgraph, the body subgraph, - * and operand values for the first iteration of the loop. The values are - * implicitly split into three groups of input-output, state-only, and - * input-only values, as described below. - * - * The outputs of this operation are the final values of input-output - * operands. - * - * Both the condition and body subgraph receive (m + k + n) inputs. - * * The first m (m >= 1) inputs are input-output operands. For the first - * iteration, these are initialized from the corresponding inputs of the - * WHILE operation. In subsequent iterations, their values come from the - * corresponding outputs of the body subgraph produced during the previous - * iteration. - * * The next k (k >= 0) inputs are state-only operands. They are similar to - * the input-output operands, except that their values are no longer - * available after the loop terminates. - * * The last n (n >= 0) inputs are input-only operands. Their values come - * from the corresponding inputs of the WHILE operation. - * - * The body subgraph produces (m + k) outputs. - * * The first m outputs are input-output operands. They become the outputs - * of the WHILE operation when a termination condition is reached. - * * The last k outputs are state-only operands. Their values are no longer - * available after the loop terminates. - * - * The numbers m, k, and n are inferred by the driver as follows: - * m = (WHILE operation output count) - * k = (body subgraph output count) - m - * n = (body subgraph input count) - m - k - * - * The pseudo-code below illustrates the flow of a WHILE operation with - * inputs condition, body, initial_input_output, initial_state, input_only - * (m = 1, k = 1, n = 1): - * - * input_output = initial_input_output - * state = initial_state - * while condition(input_output, state, input_only): - * input_output, state = body(input_output, state, input_only) - * return input_output - * - * Inputs: - * * 0: A {@link OperandType::SUBGRAPH} reference to the condition - * subgraph. The subgraph must have (m + k + n) inputs with - * the same types, ranks, dimensions, - * scales, zeroPoints, and extraParams as the - * corresponding inputs of the WHILE operation and exactly one output - * of {@link OperandType::TENSOR_BOOL8} and shape [1]. - * All of the operands mentioned must have fully specified dimensions. - * * 1: A {@link OperandType::SUBGRAPH} reference to the body subgraph. - * The subgraph must have (m + k + n) inputs and (m + k) outputs with - * the same types, ranks, dimensions, - * scales, zeroPoints, and extraParams as the - * corresponding inputs and outputs of the WHILE operation. - * All of the operands mentioned must have fully specified dimensions. - * * (m inputs): Initial values for input-output operands. - * * (k inputs): Initial values for state-only operands. - * * (n inputs): Values for input-only operands. - * - * Outputs: - * * 0 ~ (m - 1): Outputs produced by the loop. - */ - WHILE = 97, - - /** - * Computes exponential linear activation on the input tensor element-wise. - * - * The output is calculated using the following formula: - * - * ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1)) - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor, specifying the input. May be zero-sized. - * * 1: A scalar, specifying the alpha parameter. - * For input tensor of {@link OperandType::TENSOR_FLOAT16}, - * the alpha value must be of {@link OperandType::FLOAT16}. - * For input tensor of {@link OperandType::TENSOR_FLOAT32}, - * the alpha value must be of {@link OperandType::FLOAT32}. - * - * Outputs: - * * 0: The output tensor of same shape and type as input0. - */ - ELU = 98, - - /** - * Computes hard-swish activation on the input tensor element-wise. - * - * Hard swish activation is introduced in - * https://arxiv.org/pdf/1905.02244.pdf - * - * The output is calculated using the following formula: - * - * h-swish(x) = x * max(0, min(6, (x + 3))) / 6 - - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor, specifying the input. May be zero-sized. - * - * Outputs: - * * 0: The output tensor of same shape and type as input0. - * Scale and zero point of this tensor may be different from the input - * tensor's parameters. - */ - HARD_SWISH = 99, - - /** - * Creates a tensor filled with a scalar value. - * - * Supported output tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A 1-D tensor, specifying the desired output tensor shape. - * * 1: A scalar, specifying the value to fill the output tensors with. - * For output tensor of {@link OperandType::TENSOR_FLOAT16}, - * the scalar must be of {@link OperandType::FLOAT16}. - * For output tensor of {@link OperandType::TENSOR_FLOAT32}, - * the scalar must be of {@link OperandType::FLOAT32}. - * For output tensor of {@link OperandType::TENSOR_INT32}, - * the scalar must be of {@link OperandType::INT32}. - * - * Outputs: - * * 0: The output tensor. - */ - FILL = 100, - - /** - * Returns the rank of a tensor. - * - * The rank of a tensor is the number of dimensions in it. Also known as - * "order", "degree", "ndims". - * - * Supported tensor {@link OperandType}: - * * {@link OperandType::TENSOR_FLOAT16} - * * {@link OperandType::TENSOR_FLOAT32} - * * {@link OperandType::TENSOR_INT32} - * * {@link OperandType::TENSOR_QUANT8_ASYMM} - * * {@link OperandType::TENSOR_QUANT16_SYMM} - * * {@link OperandType::TENSOR_BOOL8} - * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} - * * {@link OperandType::TENSOR_QUANT16_ASYMM} - * * {@link OperandType::TENSOR_QUANT8_SYMM} - * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: The input tensor. - * - * Outputs: - * * 0: A scalar of {@link OperandType::INT32}, specifying the rank - * of the input tensor. - */ - RANK = 101, - - /** - * DEPRECATED. Since HAL version 1.2, extensions are the preferred - * alternative to OEM operation and data types. - * - * This operation is OEM specific. It should only be used for OEM - * applications. - */ - OEM_OPERATION = 10000, -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERATION_TYPES_H
diff --git a/common/include/nnapi/Result.h b/common/include/nnapi/Result.h deleted file mode 100644 index 157eb78..0000000 --- a/common/include/nnapi/Result.h +++ /dev/null
@@ -1,158 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_RESULT_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_RESULT_H - -#include <android-base/expected.h> - -#include <optional> -#include <sstream> -#include <string> -#include <tuple> -#include <utility> - -namespace android::nn { - -/** - * Type alias for `::android::base::expected` where the unexpected state is represented by a - * std::string describing the error. - * - * See the following file for more information on ::android::base::expected: - * system/libbase/include/android-base/expected.h - */ -template <typename Type> -using Result = base::expected<Type, std::string>; - -namespace detail { - -template <typename... Ts> -class ErrorBuilder { - public: - template <typename... Us> - explicit ErrorBuilder(Us&&... args) : mArgs(std::forward<Us>(args)...) {} - - template <typename T, typename E> - operator base::expected<T, E>() /* NOLINT(google-explicit-constructor) */ { - return std::apply( - [this](Ts&&... args) { - return base::unexpected<E>(E{std::move(mStream).str(), std::move(args)...}); - }, - std::move(mArgs)); - } - - template <typename T> - ErrorBuilder operator<<(const T& t) { - mStream << t; - return std::move(*this); - } - - private: - std::tuple<Ts...> mArgs; - std::ostringstream mStream; -}; - -} // namespace detail - -/** - * Creates an error builder for the case where no arguments are provided. - */ -template <typename... Types> -inline detail::ErrorBuilder<std::decay_t<Types>...> error(Types&&... args) { - return detail::ErrorBuilder<std::decay_t<Types>...>(std::forward<Types>(args)...); -} - -/** - * Helper macro that will create an error builder already populated with the file name and line - * number. - * - * This macro uses the following customization points: - * * `::android::nn::error` is a set of functions that can be customized to return a specialized - * error builder object. Customization is based on the types of arguments passed and the number - * of arguments passed to `error`. - * - * Usage at error site: - * if (errorDetected) { - * return NN_ERROR() << "<error_message>"; - * } - * return <regular_return_value>; - */ -#define NN_ERROR(...) \ - [&] { \ - using ::android::nn::error; \ - return error(__VA_ARGS__) << __FILE__ << ":" << __LINE__ << ": "; \ - }() - -template <typename T, typename E> -bool nnTryHasValue(const base::expected<T, E>& o) { - return o.has_value(); -} - -template <typename T, typename E> -T nnTryGetValue(base::expected<T, E> o) { - return std::move(o).value(); -} - -template <typename T, typename E> -base::unexpected<E> nnTryGetError(base::expected<T, E> o) { - return base::unexpected(std::move(o).error()); -} - -template <typename T> -bool nnTryHasValue(const std::optional<T>& o) { - return o.has_value(); -} - -template <typename T> -T nnTryGetValue(std::optional<T> o) { - return std::move(o).value(); -} - -template <typename T> -std::nullopt_t nnTryGetError(std::optional<T> /*o*/) { - return std::nullopt; -} - -/** - * A macro that will exit from the current function if `expr` is unexpected or return the expected - * value from the macro if `expr` is expected. - * - * This macro can currently be used on `::android::nn::Result`, `::android::base::expected`, or - * `std::optional` values. To enable this macro to be used with other values, implement the - * following functions for the type: - * * `::android::nn::nnTryHasValue` returns `true` if the `expr` holds a successful value, false if - * the `expr` value holds an error - * * `::android::nn::nnTryGetError` returns the successful value of `expr` or crashes - * * `::android::nn::nnTryGetValue` returns the error value of `expr` or crashes - * - * Usage at call site: - * const auto [a, b, c] = NN_TRY(failableFunction(args)); - */ -#define NN_TRY(expr) \ - ({ \ - using ::android::nn::nnTryHasValue; \ - using ::android::nn::nnTryGetValue; \ - using ::android::nn::nnTryGetError; \ - auto nnTryTemporaryResult = expr; \ - if (!nnTryHasValue(nnTryTemporaryResult)) { \ - return nnTryGetError(std::move(nnTryTemporaryResult)); \ - } \ - nnTryGetValue(std::move(nnTryTemporaryResult)); \ - }) - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_RESULT_H
diff --git a/common/include/nnapi/SharedMemory.h b/common/include/nnapi/SharedMemory.h deleted file mode 100644 index 27b2ca2..0000000 --- a/common/include/nnapi/SharedMemory.h +++ /dev/null
@@ -1,113 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_SHARED_MEMORY_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_SHARED_MEMORY_H - -#include <android-base/unique_fd.h> - -#include <any> -#include <memory> -#include <optional> -#include <string> -#include <variant> -#include <vector> - -#include "nnapi/Result.h" -#include "nnapi/Types.h" - -namespace android::nn { - -class MutableMemoryBuilder { - public: - explicit MutableMemoryBuilder(uint32_t poolIndex); - - DataLocation append(size_t length, size_t alignment = kMinMemoryAlignment, - size_t padding = kMinMemoryPadding); - bool empty() const; - - GeneralResult<SharedMemory> finish(); - - private: - uint32_t mPoolIndex; - size_t mSize = 0; -}; - -class ConstantMemoryBuilder { - public: - explicit ConstantMemoryBuilder(uint32_t poolIndex); - - DataLocation append(const void* data, size_t length); - bool empty() const; - - GeneralResult<SharedMemory> finish(); - - private: - struct LazyCopy { - const void* data; - size_t length; - size_t offset; - }; - - MutableMemoryBuilder mBuilder; - std::vector<LazyCopy> mSlices; -}; - -GeneralResult<base::unique_fd> dupFd(int fd); - -// Precondition: `*ForwardFdIt` must be convertible to `int` -template <typename ForwardFdIt> -GeneralResult<std::vector<base::unique_fd>> dupFds(ForwardFdIt first, ForwardFdIt last) { - std::vector<base::unique_fd> fds; - fds.reserve(std::distance(first, last)); - for (; first != last; ++first) { - const int fd = *first; - fds.push_back(NN_TRY(dupFd(fd))); - } - return fds; -} - -// Precondition: size > 0 -GeneralResult<SharedMemory> createSharedMemory(size_t size); - -// Duplicates `fd` and takes ownership of the duplicate. -// Precondition: size > 0 -GeneralResult<SharedMemory> createSharedMemoryFromFd(size_t size, int prot, int fd, size_t offset); - -// Precondition: ahwb != nullptr -GeneralResult<SharedMemory> createSharedMemoryFromAHWB(AHardwareBuffer* ahwb, bool takeOwnership); - -// Precondition: memory != nullptr -size_t getSize(const SharedMemory& memory); - -bool isAhwbBlob(const Memory::HardwareBuffer& memory); - -// Precondition: memory != nullptr -bool isAhwbBlob(const SharedMemory& memory); - -struct Mapping { - std::variant<const void*, void*> pointer; - size_t size; - std::any context; -}; - -GeneralResult<Mapping> map(const SharedMemory& memory); - -bool flush(const Mapping& mapping); - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_SHARED_MEMORY_H
diff --git a/common/include/nnapi/TypeUtils.h b/common/include/nnapi/TypeUtils.h deleted file mode 100644 index 78706c7..0000000 --- a/common/include/nnapi/TypeUtils.h +++ /dev/null
@@ -1,249 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_TYPE_UTILS_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_TYPE_UTILS_H - -#include <android-base/logging.h> -#include <android-base/macros.h> - -#include <ostream> -#include <utility> -#include <vector> - -#include "nnapi/OperandTypes.h" -#include "nnapi/OperationTypes.h" -#include "nnapi/Result.h" -#include "nnapi/Types.h" - -namespace android::nn { - -enum class HalVersion : int32_t { - UNKNOWN, - V1_0, - V1_1, - V1_2, - V1_3, - AIDL_UNSTABLE, - LATEST = V1_3, -}; - -bool isExtension(OperandType type); -bool isExtension(OperationType type); - -bool isNonExtensionScalar(OperandType operandType); - -size_t getNonExtensionSize(OperandType operandType); - -inline uint16_t getExtensionPrefix(uint32_t type) { - return static_cast<uint16_t>(type >> kExtensionTypeBits); -} - -inline uint16_t getTypeWithinExtension(uint32_t type) { - return static_cast<uint16_t>(type & kTypeWithinExtensionMask); -} - -std::optional<size_t> getNonExtensionSize(OperandType operandType, const Dimensions& dimensions); -std::optional<size_t> getNonExtensionSize(const Operand& operand); - -size_t getOffsetFromInts(int lower, int higher); -std::pair<int32_t, int32_t> getIntsFromOffset(size_t offset); - -Result<std::vector<uint32_t>> countNumberOfConsumers(size_t numberOfOperands, - const std::vector<nn::Operation>& operations); - -// Combine two tensor dimensions, both may have unspecified dimensions or rank. -Result<Dimensions> combineDimensions(const Dimensions& lhs, const Dimensions& rhs); - -// Returns the operandValues's size and a size for each pool in the provided model. -std::pair<size_t, std::vector<size_t>> getMemorySizes(const Model& model); - -// Round up "size" to the nearest multiple of "multiple". "multiple" must be a power of 2. -size_t roundUp(size_t size, size_t multiple); - -// Returns the alignment for data of the specified length. It aligns object of length: -// 2, 3 on a 2 byte boundary, -// 4+ on a 4 byte boundary. -// We may want to have different alignments for tensors. -// TODO: This is arbitrary, more a proof of concept. We need to determine what this should be. -// -// Note that Types.cpp ensures `new` has sufficient alignment for all alignments returned by this -// function. If this function is changed to return different alignments (e.g., 8 byte boundary -// alignment), the code check in Types.cpp similarly needs to be updated. -size_t getAlignmentForLength(size_t length); - -// Set of output utility functions. -std::ostream& operator<<(std::ostream& os, const DeviceStatus& deviceStatus); -std::ostream& operator<<(std::ostream& os, const ExecutionPreference& executionPreference); -std::ostream& operator<<(std::ostream& os, const DeviceType& deviceType); -std::ostream& operator<<(std::ostream& os, const MeasureTiming& measureTiming); -std::ostream& operator<<(std::ostream& os, const OperandType& operandType); -std::ostream& operator<<(std::ostream& os, const Operand::LifeTime& lifetime); -std::ostream& operator<<(std::ostream& os, const OperationType& operationType); -std::ostream& operator<<(std::ostream& os, const Request::Argument::LifeTime& lifetime); -std::ostream& operator<<(std::ostream& os, const Priority& priority); -std::ostream& operator<<(std::ostream& os, const ErrorStatus& errorStatus); -std::ostream& operator<<(std::ostream& os, const FusedActivationFunc& activation); -std::ostream& operator<<(std::ostream& os, const OutputShape& outputShape); -std::ostream& operator<<(std::ostream& os, const Timing& timing); -std::ostream& operator<<(std::ostream& os, const Capabilities::PerformanceInfo& performanceInfo); -std::ostream& operator<<(std::ostream& os, - const Capabilities::OperandPerformance& operandPerformance); -std::ostream& operator<<(std::ostream& os, - const Capabilities::OperandPerformanceTable& operandPerformances); -std::ostream& operator<<(std::ostream& os, const Capabilities& capabilities); -std::ostream& operator<<(std::ostream& os, - const Extension::OperandTypeInformation& operandTypeInformation); -std::ostream& operator<<(std::ostream& os, const Extension& extension); -std::ostream& operator<<(std::ostream& os, const DataLocation& location); -std::ostream& operator<<(std::ostream& os, - const Operand::SymmPerChannelQuantParams& symmPerChannelQuantParams); -std::ostream& operator<<(std::ostream& os, const Operand::ExtraParams& extraParams); -std::ostream& operator<<(std::ostream& os, const Operand& operand); -std::ostream& operator<<(std::ostream& os, const Operation& operation); -std::ostream& operator<<(std::ostream& os, const SharedHandle& handle); -std::ostream& operator<<(std::ostream& os, const Memory& memory); -std::ostream& operator<<(std::ostream& os, const SharedMemory& memory); -std::ostream& operator<<(std::ostream& os, const MemoryPreference& memoryPreference); -std::ostream& operator<<(std::ostream& os, const Model::Subgraph& subgraph); -std::ostream& operator<<(std::ostream& os, const Model::OperandValues& operandValues); -std::ostream& operator<<(std::ostream& os, - const Model::ExtensionNameAndPrefix& extensionNameAndPrefix); -std::ostream& operator<<(std::ostream& os, const Model& model); -std::ostream& operator<<(std::ostream& os, const BufferDesc& bufferDesc); -std::ostream& operator<<(std::ostream& os, const BufferRole& bufferRole); -std::ostream& operator<<(std::ostream& os, const Request::Argument& requestArgument); -std::ostream& operator<<(std::ostream& os, const Request::MemoryPool& memoryPool); -std::ostream& operator<<(std::ostream& os, const Request& request); -std::ostream& operator<<(std::ostream& os, const SyncFence::FenceState& fenceState); -std::ostream& operator<<(std::ostream& os, const TimePoint& timePoint); -std::ostream& operator<<(std::ostream& os, const OptionalTimePoint& optionalTimePoint); -std::ostream& operator<<(std::ostream& os, const Duration& timeoutDuration); -std::ostream& operator<<(std::ostream& os, const OptionalDuration& optionalTimeoutDuration); -std::ostream& operator<<(std::ostream& os, const Version& version); -std::ostream& operator<<(std::ostream& os, const HalVersion& halVersion); - -bool operator==(const Timing& a, const Timing& b); -bool operator!=(const Timing& a, const Timing& b); -bool operator==(const Capabilities::PerformanceInfo& a, const Capabilities::PerformanceInfo& b); -bool operator!=(const Capabilities::PerformanceInfo& a, const Capabilities::PerformanceInfo& b); -bool operator==(const Capabilities::OperandPerformance& a, - const Capabilities::OperandPerformance& b); -bool operator!=(const Capabilities::OperandPerformance& a, - const Capabilities::OperandPerformance& b); -bool operator==(const Capabilities& a, const Capabilities& b); -bool operator!=(const Capabilities& a, const Capabilities& b); -bool operator==(const Extension::OperandTypeInformation& a, - const Extension::OperandTypeInformation& b); -bool operator!=(const Extension::OperandTypeInformation& a, - const Extension::OperandTypeInformation& b); -bool operator==(const Extension& a, const Extension& b); -bool operator!=(const Extension& a, const Extension& b); -bool operator==(const MemoryPreference& a, const MemoryPreference& b); -bool operator!=(const MemoryPreference& a, const MemoryPreference& b); -bool operator==(const Operand::SymmPerChannelQuantParams& a, - const Operand::SymmPerChannelQuantParams& b); -bool operator!=(const Operand::SymmPerChannelQuantParams& a, - const Operand::SymmPerChannelQuantParams& b); -bool operator==(const Operand& a, const Operand& b); -bool operator!=(const Operand& a, const Operand& b); -bool operator==(const Operation& a, const Operation& b); -bool operator!=(const Operation& a, const Operation& b); - -// The NN_RET_CHECK family of macros defined below is similar to the CHECK family defined in -// system/libbase/include/android-base/logging.h -// -// The difference is that NN_RET_CHECK macros use LOG(ERROR) instead of LOG(FATAL) -// and return false instead of aborting. - -// Logs an error and returns false. Append context using << after. For example: -// -// NN_RET_CHECK_FAIL() << "Something went wrong"; -// -// The containing function must return a bool. -#define NN_RET_CHECK_FAIL() \ - return ::android::nn::FalseyErrorStream() \ - << "NN_RET_CHECK failed (" << __FILE__ << ":" << __LINE__ << "): " - -// Logs an error and returns false if condition is false. Extra logging can be appended using << -// after. For example: -// -// NN_RET_CHECK(false) << "Something went wrong"; -// -// The containing function must return a bool. -#define NN_RET_CHECK(condition) \ - while (UNLIKELY(!(condition))) NN_RET_CHECK_FAIL() << #condition << " " - -// Helper for NN_CHECK_xx(x, y) macros. -#define NN_RET_CHECK_OP(LHS, RHS, OP) \ - for (auto _values = ::android::base::MakeEagerEvaluator(LHS, RHS); \ - UNLIKELY(!(_values.lhs.v OP _values.rhs.v)); \ - /* empty */) \ - NN_RET_CHECK_FAIL() \ - << #LHS << " " << #OP << " " << #RHS << " (" << #LHS << " = " \ - << ::android::base::LogNullGuard<decltype(_values.lhs.v)>::Guard(_values.lhs.v) \ - << ", " << #RHS << " = " \ - << ::android::base::LogNullGuard<decltype(_values.rhs.v)>::Guard(_values.rhs.v) \ - << ") " - -// Logs an error and returns false if a condition between x and y does not hold. Extra logging can -// be appended using << after. For example: -// -// NN_RET_CHECK_EQ(a, b) << "Something went wrong"; -// -// The values must implement the appropriate comparison operator as well as -// `operator<<(std::ostream&, ...)`. -// The containing function must return a bool. -#define NN_RET_CHECK_EQ(x, y) NN_RET_CHECK_OP(x, y, ==) -#define NN_RET_CHECK_NE(x, y) NN_RET_CHECK_OP(x, y, !=) -#define NN_RET_CHECK_LE(x, y) NN_RET_CHECK_OP(x, y, <=) -#define NN_RET_CHECK_LT(x, y) NN_RET_CHECK_OP(x, y, <) -#define NN_RET_CHECK_GE(x, y) NN_RET_CHECK_OP(x, y, >=) -#define NN_RET_CHECK_GT(x, y) NN_RET_CHECK_OP(x, y, >) - -// Ensure that every user of FalseyErrorStream is linked to the -// correct instance, using the correct LOG_TAG -namespace { - -// A wrapper around LOG(ERROR) that can be implicitly converted to bool (always evaluates to false). -// Used to implement stream logging in NN_RET_CHECK. -class FalseyErrorStream { - DISALLOW_COPY_AND_ASSIGN(FalseyErrorStream); - - public: - FalseyErrorStream() {} - - template <typename T> - FalseyErrorStream& operator<<(const T& value) { - mBuffer << value; - return *this; - } - - ~FalseyErrorStream() { LOG(ERROR) << mBuffer.str(); } - - operator bool() const { return false; } - - operator Result<Version>() const { return error() << mBuffer.str(); } - - private: - std::ostringstream mBuffer; -}; - -} // namespace - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_TYPE_UTILS_H
diff --git a/common/include/nnapi/Types.h b/common/include/nnapi/Types.h deleted file mode 100644 index 4b4514e..0000000 --- a/common/include/nnapi/Types.h +++ /dev/null
@@ -1,1001 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_TYPES_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_TYPES_H - -#include <android-base/chrono_utils.h> -#include <android-base/expected.h> -#include <android-base/unique_fd.h> - -#include <array> -#include <chrono> -#include <limits> -#include <memory> -#include <optional> -#include <string> -#include <type_traits> -#include <utility> -#include <variant> -#include <vector> - -#include "nnapi/OperandTypes.h" -#include "nnapi/OperationTypes.h" -#include "nnapi/Result.h" - -// Forward declare AHardwareBuffer -extern "C" typedef struct AHardwareBuffer AHardwareBuffer; - -namespace android::nn { - -// Forward declarations - -class IBuffer; -class IBurst; -class IDevice; -class IExecution; -class IPreparedModel; -struct Memory; - -// Constants - -constexpr float kDefaultExecTime = std::numeric_limits<float>::max(); -constexpr float kDefaultPowerUsage = std::numeric_limits<float>::max(); -constexpr uint32_t kByteSizeOfCacheToken = 32; -constexpr uint32_t kMaxNumberOfCacheFiles = 32; - -/** - * Numeric values of extension operand and operation types have the - * following structure: - * - 16 high bits represent the "prefix", which corresponds uniquely to the - * extension name. - * - 16 low bits represent the type ID within the extension. - */ -constexpr uint8_t kExtensionTypeBits = 16; -constexpr uint8_t kExtensionPrefixBits = 16; -constexpr uint32_t kTypeWithinExtensionMask = 0xFFFF; - -constexpr uint32_t kDefaultRequestMemoryAlignment = 64; -constexpr uint32_t kDefaultRequestMemoryPadding = 64; -constexpr uint32_t kMinMemoryAlignment = alignof(std::max_align_t); -constexpr uint32_t kMinMemoryPadding = 1; -constexpr auto kLoopTimeoutDefault = std::chrono::seconds{2}; -constexpr auto kLoopTimeoutMaximum = std::chrono::seconds{15}; - -// Aliases - -using SharedBuffer = std::shared_ptr<const IBuffer>; -using SharedBurst = std::shared_ptr<const IBurst>; -using SharedDevice = std::shared_ptr<const IDevice>; -using SharedExecution = std::shared_ptr<const IExecution>; -using SharedMemory = std::shared_ptr<const Memory>; -using SharedPreparedModel = std::shared_ptr<const IPreparedModel>; - -// Canonical types - -/** - * Status of a device. - */ -enum class DeviceStatus { - AVAILABLE = 0, - BUSY = 1, - OFFLINE = 2, - UNKNOWN = 3, -}; - -/** - * Execution preferences. - */ -enum class ExecutionPreference { - /** - * Prefer executing in a way that minimizes battery drain. - * This is desirable for compilations that will be executed often. - */ - LOW_POWER = 0, - /** - * Prefer returning a single answer as fast as possible, even if this causes - * more power consumption. - */ - FAST_SINGLE_ANSWER = 1, - /** - * Prefer maximizing the throughput of successive frames, for example when - * processing successive frames coming from the camera. - */ - SUSTAINED_SPEED = 2, - DEFAULT = FAST_SINGLE_ANSWER, -}; - -/** - * Device types. - * - * The type of NNAPI device. - */ -enum class DeviceType { - /** The device type cannot be provided. */ - UNKNOWN = 0, - /** The device does not fall into any category below. */ - OTHER = 1, - /** The device runs NNAPI models on single or multi-core CPU. */ - CPU = 2, - /** The device can run NNAPI models and also accelerate graphics APIs such - * as OpenGL ES and Vulkan. */ - GPU = 3, - /** Dedicated accelerator for Machine Learning workloads. */ - ACCELERATOR = 4, -}; - -/** - * Specifies whether or not to measure timing information during execution. - */ -enum class MeasureTiming { - NO = 0, - YES = 1, -}; - -/** - * Priority given to a prepared model for execution. - */ -enum class Priority { - LOW = 0, - MEDIUM = 1, - HIGH = 2, - DEFAULT = MEDIUM, -}; - -// TODO: Should more errors from NeuralNetworks.h be incorporated? The left name shows errors that -// appear in NeuralNetworks.h but not in the HAL, and the right column shows what these values could -// map to: -// * OUT_OF_MEMORY ==> GENERAL_FAILURE / RESOURCE_EXHAUSTED_* -// * INCOMPLETE ==> GENERAL_FAILURE -// * UNEXPECTED_NULL ==> INVALID_ARGUMENT -// * UNMAPPABLE ==> GENERAL_FAILURE -// * BAD_STATE ==> INVALID_ARGUMENT -enum class ErrorStatus { - NONE = 0, - DEVICE_UNAVAILABLE = 1, - GENERAL_FAILURE = 2, - OUTPUT_INSUFFICIENT_SIZE = 3, - INVALID_ARGUMENT = 4, - MISSED_DEADLINE_TRANSIENT = 5, - MISSED_DEADLINE_PERSISTENT = 6, - RESOURCE_EXHAUSTED_TRANSIENT = 7, - RESOURCE_EXHAUSTED_PERSISTENT = 8, - DEAD_OBJECT = 10000, -}; - -struct GeneralError { - std::string message; - ErrorStatus code = ErrorStatus::GENERAL_FAILURE; -}; - -template <typename Type> -using GeneralResult = base::expected<Type, GeneralError>; - -/** - * Fused activation function types. - */ -enum class FusedActivationFunc : int32_t { - /** NO fused activation function. */ - NONE = 0, - /** Fused ReLU activation function. */ - RELU = 1, - /** Fused ReLU1 activation function. */ - RELU1 = 2, - /** Fused ReLU6 activation function. */ - RELU6 = 3, -}; - -using Dimension = uint32_t; -using Dimensions = std::vector<Dimension>; - -using CacheToken = std::array<uint8_t, kByteSizeOfCacheToken>; - -/** - * Describes the shape information of an output operand after execution. - */ -struct OutputShape { - /** - * Dimensions of the operand. - */ - std::vector<uint32_t> dimensions; - - /** - * Whether the provided buffer size is sufficient for the output. - */ - bool isSufficient = false; -}; - -struct ExecutionError { - std::string message; - ErrorStatus code = ErrorStatus::GENERAL_FAILURE; - // OutputShapes for code == OUTPUT_INSUFFICIENT_SIZE - std::vector<OutputShape> outputShapes = {}; -}; - -template <typename Type> -using ExecutionResult = base::expected<Type, ExecutionError>; - -/** - * The capabilities of a driver. - * - * This represents performance of non-extension operations. - * - * Performance of an operation other than {@link OperationType::IF} and - * {@link OperationType::WHILE} comes from the type of its first operand. - */ -struct Capabilities { - /** - * Performance information for the reference workload. - * - * Used by a driver to report its performance characteristics. - */ - struct PerformanceInfo { - /** - * Ratio of the time taken by the driver to execute the - * workload compared to the time the CPU would take for the - * same workload. A lower number is better. - */ - float execTime = kDefaultExecTime; - - /** - * Ratio of the energy used by the driver compared to what - * the CPU would use for doing the same workload. A lower number - * is better. - */ - float powerUsage = kDefaultPowerUsage; - }; - - /** - * Driver performance when operating on a particular data type. - * In the case of float32 data, this is used when the calculations - * are not relaxed. - */ - struct OperandPerformance { - OperandType type{}; - PerformanceInfo info; - }; - - class OperandPerformanceTable { - public: - static Result<OperandPerformanceTable> create( - std::vector<OperandPerformance> operandPerformances); - - PerformanceInfo lookup(OperandType type) const; - const std::vector<OperandPerformance>& asVector() const; - - private: - explicit OperandPerformanceTable(std::vector<OperandPerformance> operandPerformances); - std::vector<OperandPerformance> mSorted; - }; - - /** - * Driver performance when operating on float32 data but performing - * calculations with range and/or precision as low as that of the IEEE - * 754 16-bit floating-point format. - */ - PerformanceInfo relaxedFloat32toFloat16PerformanceScalar; - PerformanceInfo relaxedFloat32toFloat16PerformanceTensor; - - /** - * Performance by operand type. Must be sorted by OperandType. - * - * If a particular {@link OperandType} is not present in operandPerformance, - * its performance is treated as - * { .execTime = FLT_MAX, .powerUsage = FLT_MAX }. - * - * Performance does not apply to {@link OperandType::SUBGRAPH}, and a driver - * must not report operand performance for {@link OperandType::SUBGRAPH}. - */ - OperandPerformanceTable operandPerformance; - - /** - * Performance of an {@link OperationType::IF} operation is the sum of - * {@link Capabilities::ifPerformance} and the mean of performance for the - * two branch subgraphs, where performance for a subgraph is the sum of the - * performance of all operations within the subgraph. - */ - PerformanceInfo ifPerformance; - - /** - * Performance of a {@link OperationType::WHILE} operation is the sum of - * {@link Capabilities::whilePerformance}, performance for the condition - * subgraph and performance for the body subgraph, where performance for a - * subgraph is the sum of the performance of all operations within the - * subgraph. - */ - PerformanceInfo whilePerformance; -}; - -/** - * Information about an extension. - */ -struct Extension { - /** - * Information about an extension operand type. - */ - struct OperandTypeInformation { - /** - * The extension operand type. - */ - uint16_t type = 0; - - /** - * Indicates whether the extension operand type represents a tensor or - * a scalar. - */ - bool isTensor = false; - - /** - * The byte size of the operand (if scalar) or of a single element (if - * tensor). - */ - uint32_t byteSize = 0; - }; - - /** - * The extension name. - * - * The name must consist of lowercase latin letters, numbers, periods, and - * underscore signs. The name must contain at least one period. - * - * The name must start with the reverse domain name of the vendor. - * - * Example: com.google.test_extension - */ - std::string name; - - /** - * Information about operand types defined by the extension. - */ - std::vector<OperandTypeInformation> operandTypes; -}; - -/** - * Describes one operation of the model's graph. - */ -struct Operation { - /** - * The operation type. - */ - OperationType type{}; - - /** - * Describes the table that contains the indexes of the inputs of the - * operation. The offset is the index in the operandIndexes table. - */ - std::vector<uint32_t> inputs; - - /** - * Describes the table that contains the indexes of the outputs of the - * operation. The offset is the index in the operandIndexes table. - */ - std::vector<uint32_t> outputs; -}; - -/** - * Describes the location of a data object. - */ -struct DataLocation { - /** - * The address of the memory where the data is found. - * - * This field is only active when lifetime is POINTER. - */ - std::variant<const void*, void*> pointer; - - /** - * The index of the memory pool where this location is found. - */ - uint32_t poolIndex = 0; - - /** - * Offset in bytes from the start of the pool. - */ - uint32_t offset = 0; - - /** - * The length of the data in bytes. - */ - uint32_t length = 0; - - /** - * The end padding of the specified memory region in bytes. - */ - uint32_t padding = 0; -}; - -/** - * Describes one operand of the model's graph. - */ -struct Operand { - /** - * How an operand is used. - */ - enum class LifeTime { - /** - * The operand is internal to the model. It's created by an operation and - * consumed by other operations. It must be an output operand of - * exactly one operation. - */ - TEMPORARY_VARIABLE = 0, - - /** - * The operand is an input of a subgraph. It must not be an output - * operand of any operation. - * - * An operand can't be both input and output of a subgraph. - */ - SUBGRAPH_INPUT = 1, - - /** - * The operand is an output of a subgraph. It must be an output - * operand of exactly one operation. - * - * An operand can't be both input and output of a subgraph. - */ - SUBGRAPH_OUTPUT = 2, - - /** - * The operand is a constant found in Model::operandValues. It must - * not be an output operand of any operation. - */ - CONSTANT_COPY = 3, - - /** - * The operand is a constant that was specified via a Memory - * object. It must not be an output operand of any operation. - */ - CONSTANT_REFERENCE = 4, - - /** - * The operand does not have a value. This is valid only for optional - * arguments of operations. - */ - NO_VALUE = 5, - - /** - * The operand is a reference to a subgraph. It must be an input to one - * or more {@link OperationType::IF} or {@link OperationType::WHILE} - * operations. - */ - SUBGRAPH = 6, - - /** - * This operand is a constant found in a user buffer. It must not be an - * output operand of any operation. - */ - POINTER = 7, - }; - - /** - * No additional parameters. - */ - using NoParams = std::monostate; - - /** - * Parameters for TENSOR_QUANT8_SYMM_PER_CHANNEL operand. - */ - struct SymmPerChannelQuantParams { - /** Array of scaling values for each channel. Each value must be greater than zero. */ - std::vector<float> scales; - /** Index of the channel dimension */ - uint32_t channelDim = 0; - }; - - /** - * Extension operand parameters. - * - * The framework treats this as an opaque data blob. - * The format is up to individual extensions. - */ - using ExtensionParams = std::vector<uint8_t>; - - /** - * Additional parameters specific to a particular operand type. - */ - using ExtraParams = std::variant<NoParams, SymmPerChannelQuantParams, ExtensionParams>; - - /** - * The data type. - * - * Besides the values listed in {@link OperationType}, any value equal or over - * (1 << kExtensionTypeBits) is possible and should be interpreted - * as an extension type according to {@link Model::extensionNameToPrefix}. - */ - OperandType type{}; - - /** - * Dimensions of the operand. - * - * For a scalar operand, dimensions.size() must be 0. - * - * A tensor operand with all dimensions specified has "fully - * specified" dimensions. Whenever possible (i.e., whenever the - * dimensions are known at model construction time), a tensor - * operand should have (but is not required to have) fully - * specified dimensions, in order to enable the best possible - * performance. - * - * If a tensor operand's dimensions are not fully specified, the - * dimensions of the operand are deduced from the operand - * dimensions and values of the operation for which that operand - * is an output or from the corresponding {@link OperationType::IF} or - * {@link OperationType::WHILE} operation input operand dimensions in the - * case of referenced subgraph input operands. - * - * In the following situations, a tensor operand's dimensions must - * be fully specified: - * - * - The operand has lifetime CONSTANT_COPY, CONSTANT_REFERENCE, or - * POINTER. - * - * - The operand has lifetime SUBGRAPH_INPUT and belongs to the main - * subgraph. Fully specified dimensions must either be present in the - * Operand or they must be provided in the corresponding - * RequestArgument. - * EXCEPTION: If the input is optional and omitted - * (by setting the hasNoValue field of the corresponding - * RequestArgument to true) then it need not have fully - * specified dimensions. - * - * A tensor operand with some number of unspecified dimensions is - * represented by setting each unspecified dimension to 0. - * - * A tensor operand with unspecified rank is represented by providing - * an empty dimensions vector. - */ - Dimensions dimensions; - - /** - * Quantized scale of the operand. - * - * Must be 0 when not applicable to an operand type. - * - * See {@link OperandType}. - */ - float scale = 0.0f; - - /** - * Quantized zero-point offset of the operand. - * - * Must be 0 when not applicable to an operand type. - * - * See {@link OperandType}. - */ - int32_t zeroPoint = 0; - - /** - * How the operand is used. - */ - LifeTime lifetime{}; - - /** - * Where to find the data for this operand. - * If the lifetime is TEMPORARY_VARIABLE, SUBGRAPH_INPUT, SUBGRAPH_OUTPUT, - * or NO_VALUE: - * - All the fields must be 0. - * If the lifetime is CONSTANT_COPY: - * - location.pointer is null. - * - location.poolIndex is 0. - * - location.offset is the offset in bytes into Model::operandValues. - * - location.length is set. - * - location.padding is 0. - * If the lifetime is CONSTANT_REFERENCE: - * - location.pointer is null. - * - location.poolIndex is set. - * - location.offset is the offset in bytes into the specified pool. - * - location.length is set. - * - location.padding is set. - * If the lifetime is SUBGRAPH: - * - location.pointer is null. - * - location.poolIndex is 0. - * - location.offset is the index of the referenced subgraph in - * {@link Model::referenced}. - * - location.length is 0. - * - location.padding is 0. - * If the lifetime is POINTER: - * - location.pointer is non-null. - * - location.poolIndex is 0. - * - location.offset is 0. - * - location.length is set. - * - location.padding is 0. - */ - DataLocation location; - - /** - * Additional parameters specific to a particular operand type. - */ - ExtraParams extraParams; -}; - -struct Handle { - std::vector<base::unique_fd> fds; - std::vector<int> ints; -}; - -using SharedHandle = std::shared_ptr<const Handle>; - -struct Memory { - struct Ashmem { - base::unique_fd fd; - size_t size; - }; - - struct Fd { - size_t size; - int prot; - base::unique_fd fd; - size_t offset; - }; - - // RAII wrapper for AHardwareBuffer - struct HardwareBuffer { - using Deleter = std::add_pointer_t<void(AHardwareBuffer*)>; - using Handle = std::unique_ptr<AHardwareBuffer, Deleter>; - Handle handle; - }; - - struct Unknown { - Handle handle; - size_t size; - std::string name; - }; - - std::variant<Ashmem, Fd, HardwareBuffer, Unknown> handle; -}; - -/** - * A Neural Network Model. - * - * This includes not only the execution graph, but also constant data such as - * weights or scalars added at construction time. The only information that - * may not be known is the shape of the input tensors. - */ -struct Model { - /** - * An excerpt of the execution graph. - */ - struct Subgraph { - /** - * All operands included in the subgraph. - */ - std::vector<Operand> operands; - - /** - * All operations included in the subgraph. - * - * The operations are sorted into execution order. Every operand - * with lifetime SUBGRAPH_OUTPUT or TEMPORARY_VARIABLE must be - * written before it is read. - */ - std::vector<Operation> operations; - - /** - * Input indexes of the subgraph. There must be at least one. - * - * Each value corresponds to the index of the operand in "operands". - */ - std::vector<uint32_t> inputIndexes; - - /** - * Output indexes of the subgraph. There must be at least one. - * - * Each value corresponds to the index of the operand in "operands". - */ - std::vector<uint32_t> outputIndexes; - }; - - class OperandValues { - public: - OperandValues(); - OperandValues(const uint8_t* data, size_t length); - - // Append a segment of memory (starting at `data` with `length` number of bytes) to the back - // of `OperandValues`, adding padding as necessary so that the appended data is aligned. - // Refer to `getAlignmentForLength` for more information on alignment (such as what the - // current alignments are for different data lengths). - DataLocation append(const uint8_t* data, size_t length); - - const uint8_t* data() const; - size_t size() const; - - private: - std::vector<uint8_t> mData; - }; - - /** - * A correspondence between an extension name and a prefix of operand and - * operation type values. - */ - struct ExtensionNameAndPrefix { - /** - * The extension name. - * - * See {@link Extension::name} for the format specification. - */ - std::string name; - - /** - * The unique extension identifier within the model. - * - * See {@link Model::extensionNameToPrefix}. - */ - uint16_t prefix = 0; - }; - - /** - * The top-level subgraph. - */ - Subgraph main; - - /** - * Referenced subgraphs. - * - * Each subgraph is referenced by the main subgraph or at least one other - * referenced subgraph. - * - * There must be no reference cycles. - */ - std::vector<Subgraph> referenced; - - /** - * A byte buffer containing operand data that were copied into the model. - * - * An operand's value must be located here if and only if Operand::lifetime - * equals Operand::LifeTime::CONSTANT_COPY. - */ - OperandValues operandValues; - - /** - * A collection of shared memory pools containing operand values. - * - * An operand's value must be located here if and only if Operand::lifetime - * equals Operand::LifeTime::CONSTANT_REFERENCE. - */ - std::vector<SharedMemory> pools; - - /** - * 'true' indicates TENSOR_FLOAT32 may be calculated with range and/or - * precision as low as that of the IEEE 754 16-bit floating-point format. - * 'false' indicates TENSOR_FLOAT32 must be calculated using at least the - * range and precision of the IEEE 754 32-bit floating-point format. - */ - bool relaxComputationFloat32toFloat16 = false; - - /** - * The mapping between extension names and prefixes of operand and - * operation type values. - * - * An operand or operation whose numeric type value is equal to or greater - * than (1 << kExtensionTypeBits) should be interpreted - * as an extension operand. The low - * {@link kExtensionTypeBits} bits of the value correspond to the type ID - * within the extension and the high {@link kExtensionPrefixBits} bits encode - * the "prefix", which maps uniquely to the extension name. - * - * For example, if a model contains an operation whose value is - * 0xAAAABBBB and extensionNameToPrefix contains an entry with - * prefix=0xAAAA and name="vendor.test.test_extension", then - * the operation should be interpreted as the operation 0xBBBB - * of the extension named vendor.test.test_extension. - * - * This is a one-to-one correspondence. That is, there must be at most one - * prefix corresponding to each extension name and at most one extension - * name corresponding to each prefix. - */ - std::vector<ExtensionNameAndPrefix> extensionNameToPrefix; -}; - -/** - * A buffer descriptor. Describes the properties of a buffer. - */ -struct BufferDesc { - /** - * Dimensions of the buffer. May have unknown dimensions or rank. A buffer with some number - * of unspecified dimensions is represented by setting each unspecified dimension to 0. A - * buffer with unspecified rank is represented by providing an empty dimensions vector. - */ - Dimensions dimensions; -}; - -/** - * Describes a role of an input or output to a prepared model. - */ -struct BufferRole { - /** - * The index of the IPreparedModel within the "preparedModel" argument passed in - * IDevice::allocate. - */ - uint32_t modelIndex = 0; - - /** - * The index of the input or output operand. - */ - uint32_t ioIndex = 0; - - /** - * A floating-point value within the range (0.0, 1.0]. Describes how likely the - * buffer is to be used in the specified role. This is provided as a hint to - * optimize the case when multiple roles prefer different buffer locations or data - * layouts. - */ - float probability = 0.0f; -}; - -/** - * Inputs to be sent to and outputs to be retrieved from a prepared model. - * - * A Request serves two primary tasks: - * 1) Provides the input and output data to be used when executing the model. - * 2) Specifies any updates to the input operand metadata that were left - * unspecified at model preparation time. - * - * An output must not overlap with any other output, with an input, or - * with an operand of lifetime CONSTANT_REFERENCE. - */ -struct Request { - /** - * Metadata information specifying the location of the input or output data and - * any updates to the input or output operand. - */ - struct Argument { - enum class LifeTime { - POOL = 0, - NO_VALUE = 1, - POINTER = 2, - }; - - LifeTime lifetime{}; - - /** - * The location within one of the memory pools passed in the Request. - */ - DataLocation location; - - /** - * Updated dimension information. - * - * If dimensions.size() > 0, dimension information was provided - * along with the argument. This can be the case for models that - * accept inputs of varying size. This can't change the rank, just - * the value of the dimensions that were unspecified in the - * model. If dimensions.size() > 0, then all dimensions must be - * specified here; and any dimension that was specified in the - * model must have the same value here. - * - * If the dimensions in the model are not fully specified, then - * they must be fully specified here, unless hasNoValue is set to - * true. If the dimensions in the model are fully specified, then - * either dimensions.size() may be 0, or the dimensions in the - * model must be identical to the dimensions here. - */ - Dimensions dimensions; - }; - - /** - * Specifies a driver-managed buffer. It is the token corresponding to an - * IBuffer returned from IDevice::allocate, and is specific to the IDevice - * object. - */ - enum class MemoryDomainToken : uint32_t {}; - - /** - * A memory pool. - */ - using MemoryPool = std::variant<SharedMemory, MemoryDomainToken, SharedBuffer>; - - /** - * Input data and information to be used in the execution of a prepared - * model. - * - * The index of the input corresponds to the index in Model::main::inputIndexes. - * E.g., inputs[i] corresponds to Model::main::inputIndexes[i]. - */ - std::vector<Argument> inputs; - - /** - * Output data and information to be used in the execution of a prepared - * model. - * - * The index of the output corresponds to the index in Model::main::outputIndexes. - * E.g., outputs[i] corresponds to Model::main::outputIndexes[i]. - */ - std::vector<Argument> outputs; - - /** - * A collection of memory pools containing operand data for both the - * inputs and the outputs to a model. - */ - std::vector<MemoryPool> pools; -}; - -// Representation of sync_fence. -class SyncFence { - public: - static SyncFence createAsSignaled(); - static SyncFence create(base::unique_fd fd); - static Result<SyncFence> create(SharedHandle syncFence); - - // The function syncWait() has the same semantics as the system function - // ::sync_wait(), except that the syncWait() return value is semantically - // richer. - enum class FenceState { - ACTIVE, // fence has not been signaled - SIGNALED, // fence has been signaled - ERROR, // fence has been placed in the error state - UNKNOWN, // either bad argument passed to syncWait(), or internal error - }; - using Timeout = std::chrono::duration<int, std::milli>; - using OptionalTimeout = std::optional<Timeout>; - - FenceState syncWait(OptionalTimeout optionalTimeout) const; - - SharedHandle getSharedHandle() const; - bool hasFd() const; - int getFd() const; - - private: - explicit SyncFence(SharedHandle syncFence); - - SharedHandle mSyncFence; -}; - -using Clock = base::boot_clock; - -using Duration = std::chrono::nanoseconds; -using OptionalDuration = std::optional<Duration>; - -using TimePoint = std::chrono::time_point<Clock, Duration>; -using OptionalTimePoint = std::optional<TimePoint>; - -/** - * Timing information measured during execution. Each time is a duration from - * the beginning of some task to the end of that task, including time when that - * task is not active (for example, preempted by some other task, or - * waiting for some resource to become available). - * - * Times are measured in nanoseconds. - */ -struct Timing { - /** Execution time on device (not driver, which runs on host processor). */ - OptionalDuration timeOnDevice; - /** Execution time in driver (including time on device). */ - OptionalDuration timeInDriver; -}; - -// Returns status, timingLaunched, timingFenced -using ExecuteFencedInfoCallback = std::function<GeneralResult<std::pair<Timing, Timing>>()>; - -enum class Version { ANDROID_OC_MR1, ANDROID_P, ANDROID_Q, ANDROID_R, ANDROID_S, CURRENT_RUNTIME }; - -// Describes the memory preference of an operand. -struct MemoryPreference { - // Must be a power of 2. - // For pointer buffers, the alignment is satisfied if the address of the pointer is a multiple - // of the "alignment" value. For memory pools, the alignment is satisfied if the offset of the - // sub-region specified by DataLocation is a multiple of the "alignment" value. - uint32_t alignment; - // Must be a power of 2. - // For both pointer buffers and memory pools, the padding is satisfied if the padded length is - // greater than or equal to the raw size of the operand (i.e. the size of an element multiplied - // by the number of elements) rounding up to a multiple of the "padding" value. In DataLocation, - // the padded length equals to the sum of the length and padding fields. - uint32_t padding; -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_TYPES_H
diff --git a/common/include/nnapi/Validation.h b/common/include/nnapi/Validation.h deleted file mode 100644 index 9351b4c..0000000 --- a/common/include/nnapi/Validation.h +++ /dev/null
@@ -1,134 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_VALIDATION_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_VALIDATION_H - -#include <memory> -#include <set> -#include <tuple> -#include <vector> - -#include "nnapi/Result.h" -#include "nnapi/Types.h" - -namespace android::nn { - -// Utility functions - -Version combineVersions(Version lhs, Version rhs); - -Result<Version> validate(const DeviceStatus& deviceStatus); -Result<Version> validate(const ExecutionPreference& executionPreference); -Result<Version> validate(const DeviceType& deviceType); -Result<Version> validate(const MeasureTiming& measureTiming); -Result<Version> validate(const OperandType& operandType); -Result<Version> validate(const Priority& priority); -Result<Version> validate(const ErrorStatus& errorStatus); -Result<Version> validate(const FusedActivationFunc& activation); -Result<Version> validate(const OutputShape& outputShape); -Result<Version> validate(const Timing& timing); -Result<Version> validate(const Capabilities& capabilities); -Result<Version> validate(const Extension& extension); -Result<Version> validate(const SharedHandle& handle); -Result<Version> validate(const SharedMemory& memory); -Result<Version> validate(const Model& model); -Result<Version> validate(const BufferDesc& bufferDesc); -Result<Version> validate(const BufferRole& bufferRole); -Result<Version> validate(const Request& request); -Result<Version> validate(const OptionalTimePoint& optionalTimePoint); -Result<Version> validate(const OptionalDuration& optionalTimeoutDuration); -Result<Version> validate(const CacheToken& cacheToken); -Result<Version> validate(const SyncFence& syncFence); - -Result<Version> validate(const std::vector<OutputShape>& outputShapes); -Result<Version> validate(const std::vector<Extension>& extensions); -Result<Version> validate(const std::vector<SharedHandle>& handles); -Result<Version> validate(const std::vector<BufferRole>& bufferRoles); -Result<Version> validate(const std::vector<SyncFence>& syncFences); - -// Validate request applied to model. -Result<Version> validateRequestForModel(const Request& request, const Model& model, - bool allowUnspecifiedOutput = true); - -// Validate memory descriptor. -enum class IOType { INPUT, OUTPUT }; -using PreparedModelRole = std::tuple<const IPreparedModel*, IOType, uint32_t>; - -// Verifies that the input arguments to IDevice::allocate are valid. -// Optionally, this function can return a flattened prepared model roles and a combined operand. -// Pass nullptr if either value is not needed. -// IMPORTANT: This function cannot validate dimensions and extraParams with extension operand type. -// Each driver should do their own validation of extension type dimensions and extraParams. -Result<Version> validateMemoryDesc( - const BufferDesc& desc, const std::vector<SharedPreparedModel>& preparedModels, - const std::vector<BufferRole>& inputRoles, const std::vector<BufferRole>& outputRoles, - const std::function<const Model*(const SharedPreparedModel&)>& getModel, - std::set<PreparedModelRole>* preparedModelRoles, Operand* combinedOperand); - -Result<void> validateOperandSymmPerChannelQuantParams( - const Operand& operand, const Operand::SymmPerChannelQuantParams& channelQuant, - const char* tag); - -// Validates an operand type. -// -// extensionOperandTypeInfo must be nullptr iff the type is not an extension type. -// -// If allowPartial is true, the dimensions may be underspecified. -Result<void> validateOperandType(const Operand& type, - const Extension::OperandTypeInformation* extensionOperandTypeInfo, - const char* tag, bool allowPartial); -Result<void> validateOperandList(const std::vector<uint32_t>& list, size_t operandCount, - const char* tag); - -// Validates the operation, and ensures it uses subgraphs in a valid way, but does not validate any -// subgraphs or operands themselves. -// -// This function is currently used by ModelBuilder. -Result<void> validateOperationButNotOperands(const Operation& operation, - const std::vector<Operand>& operands, - const std::vector<Model::Subgraph>& subgraphs); - -// Forward declaration for a utility class for caching a referenced subgraph's version. -struct SubgraphVersionCache; - -// Function to create an opaque handle to a utility class for caching a referenced subgraph's -// version. -std::unique_ptr<SubgraphVersionCache, void (*)(SubgraphVersionCache*)> createSubgraphVersionCache( - size_t subgraphCount); - -// Validate the operation or operand, also validating any subgraphs and operands it may use, -// recursively. -// -// `subgraphVersionCache` is used to cache validation information for `subgraphs`, which would -// otherwise be unnecessarily re-validated. For this reason, `subgraphVersionCache` must be non-null -// and must have been created with the number of referenced subgraphs in `subgraphs`. The provided -// subgraphs must not form a reference cycle. -// -// These functions are currently used by MetaModel. -Result<Version> validateOperationAndAnythingItDependsOn( - const Operation& operation, const std::vector<Operand>& operands, size_t operandValuesSize, - const std::vector<size_t>& poolSizes, const std::vector<Model::Subgraph>& subgraphs, - SubgraphVersionCache* subgraphVersionCache); -Result<Version> validateOperandAndAnythingItDependsOn(const Operand& operand, - size_t operandValuesSize, - const std::vector<size_t>& poolSizes, - const std::vector<Model::Subgraph>& subgraphs, - SubgraphVersionCache* subgraphVersionCache); - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_VALIDATION_H
diff --git a/common/operations/Activation.cpp b/common/operations/Activation.cpp index 5b39448..ff5a55d 100644 --- a/common/operations/Activation.cpp +++ b/common/operations/Activation.cpp
@@ -16,28 +16,28 @@ #define LOG_TAG "Operations" -#include <algorithm> -#include <limits> -#include <vector> - -#include "ActivationFunctor.h" -#include "OperationResolver.h" -#include "OperationsUtils.h" -#include "Tracing.h" - -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h> #include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h> #include <tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h> #include <tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h> #include <tensorflow/lite/kernels/internal/reference/reference_ops.h> +#include <algorithm> +#include <limits> +#include <vector> + +#include "ActivationFunctor.h" #include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION +#include "HalInterfaces.h" +#include "OperationResolver.h" +#include "OperationsUtils.h" +#include "Tracing.h" namespace android { namespace nn { +using namespace hal; + namespace activation { constexpr uint32_t kNumInputs = 1; @@ -46,7 +46,6 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { template <typename T> @@ -275,8 +274,7 @@ NNTRACE_COMP_SWITCH("reference_integer_ops::Tanh"); tflite::reference_integer_ops::Tanh(inputShape.offset, input_range_radius, input_multiplier, - input_left_shift, convertShapeToTflshape(inputShape), - inputData, convertShapeToTflshape(outputShape), outputData); + input_left_shift, numElements, inputData, outputData); return true; } @@ -357,55 +355,47 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(OperationType opType, const IOperationValidationContext* context) { +bool validate(OperationType opType, const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); auto inputType = context->getInputType(kInputTensor); - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_FLOAT32) { - minSupportedVersion = Version::ANDROID_OC_MR1; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); } else if (inputType == OperandType::TENSOR_FLOAT16) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { if (opType == OperationType::TANH) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } else { - minSupportedVersion = Version::ANDROID_OC_MR1; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); } } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); } else { - NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType; + NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << getOperationName(opType); } const Shape& input = context->getInputShape(kInputTensor); if (hasKnownRank(input)) { NN_RET_CHECK_LE(getNumberOfDimensions(input), 4); } - NN_RET_CHECK(validateInputTypes(context, {inputType})); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, {inputType}) && validateOutputTypes(context, {inputType}); } -Result<Version> validateHardSwish(const IOperationValidationContext* context) { +bool validateHardSwish(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); auto inputType = context->getInputType(kInputTensor); - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM || inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); } else { NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation ELU"; } - NN_RET_CHECK(validateInputTypes(context, {inputType})); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, {inputType}) && validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(OperationType opType, IOperationExecutionContext* context) { Shape input = context->getInputShape(kInputTensor); if (opType != OperationType::HARD_SWISH) { @@ -623,7 +613,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation TANH"; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace activation
diff --git a/common/operations/ArgMinMax.cpp b/common/operations/ArgMinMax.cpp index b508950..f53ba47 100644 --- a/common/operations/ArgMinMax.cpp +++ b/common/operations/ArgMinMax.cpp
@@ -19,12 +19,16 @@ #define LOG_TAG "Operations" #include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "Operations.h" + #include "Tracing.h" namespace android { namespace nn { +using namespace hal; + template <typename In, typename Out> static void argMinMaxImpl(const In* inputData, const Shape& inputShape, int32_t axis, bool isArgMin, Out* outputData, const Shape& outputShape) {
diff --git a/common/operations/BidirectionalSequenceLSTM.cpp b/common/operations/BidirectionalSequenceLSTM.cpp index 6be67b0..12ac43f 100644 --- a/common/operations/BidirectionalSequenceLSTM.cpp +++ b/common/operations/BidirectionalSequenceLSTM.cpp
@@ -18,13 +18,12 @@ #include "BidirectionalSequenceLSTM.h" -#include <tensorflow/lite/kernels/internal/tensor_utils.h> - #include <algorithm> #include <vector> #include "CpuExecutor.h" #include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "OperationsUtils.h" #include "Tracing.h" @@ -33,6 +32,8 @@ namespace { +using namespace hal; + template <typename T> inline T* GetBuffer(RunTimeOperandInfo* operand) { return reinterpret_cast<T*>(operand->buffer);
diff --git a/common/operations/BidirectionalSequenceLSTM.h b/common/operations/BidirectionalSequenceLSTM.h index d697867..184b65d 100644 --- a/common/operations/BidirectionalSequenceLSTM.h +++ b/common/operations/BidirectionalSequenceLSTM.h
@@ -17,6 +17,8 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_BIDIRECTIONAL_SEQUENCE_LSTM_H #define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_BIDIRECTIONAL_SEQUENCE_LSTM_H +#include <tensorflow/lite/kernels/internal/tensor_utils.h> + #include <algorithm> #include <cmath> #include <vector> @@ -32,11 +34,12 @@ class BidirectionalSequenceLSTM { public: - BidirectionalSequenceLSTM(const Operation& operation, RunTimeOperandInfo* operands); + BidirectionalSequenceLSTM(const hal::Operation& operation, RunTimeOperandInfo* operands); - bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* fwOutputShape, - Shape* bwOutputShape, Shape* fwOutputActivationState, Shape* fwOutputCellState, - Shape* bwOutputActivationState, Shape* bwOutputCellState); + bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands, + Shape* fwOutputShape, Shape* bwOutputShape, Shape* fwOutputActivationState, + Shape* fwOutputCellState, Shape* bwOutputActivationState, + Shape* bwOutputCellState); bool Eval(); // Input Tensors of size {max_time, n_batch, n_input}
diff --git a/common/operations/BidirectionalSequenceRNN.cpp b/common/operations/BidirectionalSequenceRNN.cpp index 0598536..98917c0 100644 --- a/common/operations/BidirectionalSequenceRNN.cpp +++ b/common/operations/BidirectionalSequenceRNN.cpp
@@ -20,6 +20,7 @@ #include <utility> #include <vector> +#include "HalInterfaces.h" #include "OperationResolver.h" #include "RNN.h" @@ -58,9 +59,10 @@ constexpr uint32_t kFwOutputHiddenStateTensor = 2; constexpr uint32_t kBwOutputHiddenStateTensor = 3; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + template <typename T> void transposeFirstTwoDims(const T* input, const Shape& inputShape, T* output) { const uint32_t firstDimSize = getSizeOfDimension(inputShape, 0); @@ -313,9 +315,8 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); // Exact number is dependent on the mergeOutputs parameter and checked // during preparation. @@ -325,8 +326,9 @@ OperandType inputType = context->getInputType(kInputTensor); if (inputType != OperandType::TENSOR_FLOAT16 && inputType != OperandType::TENSOR_FLOAT32) { - return NN_ERROR() << "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_RNN op: " - << inputType; + LOG(ERROR) << "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_RNN op: " + << toString(inputType); + return false; } NN_RET_CHECK(validateInputTypes( context, {inputType, inputType, inputType, inputType, inputType, inputType, inputType, @@ -336,14 +338,13 @@ std::vector<OperandType> outExpectedTypes(numOutputs, inputType); NN_RET_CHECK(validateOutputTypes(context, outExpectedTypes)); - Version minSupportedVersion = Version::ANDROID_Q; + HalVersion minSupportedHalVersion = HalVersion::V1_2; if (numOutputs == kNumOutputsWithState || numOutputs == kNumOutputsMergedWithState) { - minSupportedVersion = Version::ANDROID_R; + minSupportedHalVersion = HalVersion::V1_3; } - return minSupportedVersion; + return validateHalVersion(context, minSupportedHalVersion); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { const bool mergeOutputs = context->getInputValue<bool>(kMergeOutputsParam); const int32_t numOutputs = context->getNumOutputs(); @@ -471,7 +472,6 @@ } return true; } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace bidirectional_sequence_rnn
diff --git a/common/operations/Broadcast.cpp b/common/operations/Broadcast.cpp index 84d3335..17094af 100644 --- a/common/operations/Broadcast.cpp +++ b/common/operations/Broadcast.cpp
@@ -18,16 +18,6 @@ #define LOG_TAG "Operations" -#include <algorithm> -#include <vector> - -#include "IndexedShapeWrapper.h" -#include "OperationResolver.h" -#include "Tracing.h" -#include "nnapi/Types.h" -#include "nnapi/Validation.h" - -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #include <tensorflow/lite/kernels/internal/optimized/integer_ops/add.h> #include <tensorflow/lite/kernels/internal/optimized/integer_ops/mul.h> #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h> @@ -35,12 +25,20 @@ #include <tensorflow/lite/kernels/internal/reference/integer_ops/mul.h> #include <tensorflow/lite/kernels/internal/types.h> +#include <algorithm> +#include <vector> + #include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION +#include "HalInterfaces.h" +#include "IndexedShapeWrapper.h" +#include "OperationResolver.h" +#include "Tracing.h" namespace android { namespace nn { +using namespace hal; + namespace broadcast { constexpr uint32_t kNumInputs = 3; @@ -51,21 +49,20 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { #define ANDROID_NN_MACRO_DISPATCH(macro) \ switch (activation) { \ - case static_cast<int32_t>(FusedActivationFunc::NONE): \ + case (int32_t)FusedActivationFunc::NONE: \ macro(kNone); \ break; \ - case static_cast<int32_t>(FusedActivationFunc::RELU): \ + case (int32_t)FusedActivationFunc::RELU: \ macro(kRelu); \ break; \ - case static_cast<int32_t>(FusedActivationFunc::RELU1): \ + case (int32_t)FusedActivationFunc::RELU1: \ macro(kRelu1); \ break; \ - case static_cast<int32_t>(FusedActivationFunc::RELU6): \ + case (int32_t)FusedActivationFunc::RELU6: \ macro(kRelu6); \ break; \ default: \ @@ -211,7 +208,7 @@ bool executeInt32(const int32_t* aData, const Shape& aShape, const int32_t* bData, const Shape& bShape, int32_t activation, int32_t* outputData, const Shape& outputShape, int32_t func(int32_t, int32_t)) { - NN_RET_CHECK_EQ(static_cast<FusedActivationFunc>(activation), FusedActivationFunc::NONE); + NN_RET_CHECK_EQ(activation, ANEURALNETWORKS_FUSED_NONE); IndexedShapeWrapper aShapeIndexed(aShape); IndexedShapeWrapper bShapeIndexed(bShape); IndexedShapeWrapper outputShapeIndexed(outputShape); @@ -437,22 +434,21 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(OperationType opType, const IOperationValidationContext* context) { - auto minSupportedVersion = (opType == OperationType::DIV || opType == OperationType::SUB) - ? Version::ANDROID_P - : Version::ANDROID_OC_MR1; +bool validate(OperationType opType, const IOperationValidationContext* context) { + const HalVersion opIntroducedAt = (opType == OperationType::DIV || opType == OperationType::SUB) + ? HalVersion::V1_1 + : HalVersion::V1_0; NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); auto inputType = context->getInputType(kInputTensor1); if (inputType == OperandType::TENSOR_FLOAT32) { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_OC_MR1); + NN_RET_CHECK(validateHalVersion(context, std::max(HalVersion::V1_0, opIntroducedAt))); } else if (inputType == OperandType::TENSOR_FLOAT16) { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_Q); + NN_RET_CHECK(validateHalVersion(context, std::max(HalVersion::V1_2, opIntroducedAt))); } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { if (opType == OperationType::SUB) { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_Q); + NN_RET_CHECK(validateHalVersion(context, std::max(HalVersion::V1_2, opIntroducedAt))); } else if (opType == OperationType::DIV) { NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation DIV"; } else if (opType == OperationType::MUL) { @@ -460,15 +456,15 @@ Shape input1 = context->getInputShape(kInputTensor1); Shape input2 = context->getInputShape(kInputTensor2); NN_RET_CHECK_GT(output.scale, input1.scale * input2.scale); - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_OC_MR1); + NN_RET_CHECK(validateHalVersion(context, std::max(HalVersion::V1_0, opIntroducedAt))); } else { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_OC_MR1); + NN_RET_CHECK(validateHalVersion(context, std::max(HalVersion::V1_0, opIntroducedAt))); } } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || inputType == OperandType::TENSOR_INT32) { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_R); + NN_RET_CHECK(validateHalVersion(context, std::max(HalVersion::V1_3, opIntroducedAt))); } else { - NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType; + NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << getOperationName(opType); } const Shape& input1 = context->getInputShape(kInputTensor1); const Shape& input2 = context->getInputShape(kInputTensor2); @@ -476,12 +472,10 @@ NN_RET_CHECK_LE(getNumberOfDimensions(input1), 4); NN_RET_CHECK_LE(getNumberOfDimensions(input2), 4); } - NN_RET_CHECK(validateInputTypes(context, {inputType, inputType, OperandType::INT32})); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, {inputType, inputType, OperandType::INT32}) && + validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape input1 = context->getInputShape(kInputTensor1); Shape input2 = context->getInputShape(kInputTensor2); @@ -683,7 +677,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation DIV"; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace broadcast
diff --git a/common/operations/Cast.cpp b/common/operations/Cast.cpp index aef3baf..77e35af 100644 --- a/common/operations/Cast.cpp +++ b/common/operations/Cast.cpp
@@ -20,6 +20,7 @@ #include <algorithm> +#include "HalInterfaces.h" #include "Operations.h" #include "Tracing.h" @@ -29,6 +30,8 @@ namespace { +using namespace hal; + template <typename FromT, typename ToT> void copyCast(const FromT* in, ToT* out, int numElements) { std::transform(in, in + numElements, out, [](FromT a) -> ToT {
diff --git a/common/operations/ChannelShuffle.cpp b/common/operations/ChannelShuffle.cpp index efa0873..7abf224 100644 --- a/common/operations/ChannelShuffle.cpp +++ b/common/operations/ChannelShuffle.cpp
@@ -16,6 +16,7 @@ #define LOG_TAG "Operations" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" #include "Tracing.h" @@ -24,6 +25,8 @@ namespace nn { namespace channel_shuffle { +using namespace hal; + constexpr char kOperationName[] = "CHANNEL_SHUFFLE"; constexpr uint32_t kNumInputs = 3; @@ -57,7 +60,7 @@ return true; } -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); auto inputType = context->getInputType(kInputTensor); @@ -73,9 +76,9 @@ NN_RET_CHECK(validateInputTypes(context, {inputType, OperandType::INT32, OperandType::INT32})); NN_RET_CHECK(validateOutputTypes(context, {inputType})); if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - return Version::ANDROID_R; + return validateHalVersion(context, HalVersion::V1_3); } else { - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } }
diff --git a/common/operations/Comparisons.cpp b/common/operations/Comparisons.cpp index b490c92..a8f8622 100644 --- a/common/operations/Comparisons.cpp +++ b/common/operations/Comparisons.cpp
@@ -19,6 +19,7 @@ #include <functional> #include <vector> +#include "HalInterfaces.h" #include "IndexedShapeWrapper.h" #include "OperationResolver.h" #include "OperationsUtils.h" @@ -36,6 +37,8 @@ namespace { +using namespace hal; + template <typename DataType, typename ComparisonType> bool compute(const std::function<bool(ComparisonType, ComparisonType)>& func, const DataType* aData, const Shape& aShape, const DataType* bData, const Shape& bShape, bool8* outputData, @@ -123,7 +126,7 @@ } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor1); @@ -132,13 +135,13 @@ inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_INT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM || inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) - << "Unsupported input operand type for comparison op: " << inputType; + << "Unsupported input operand type for comparison op: " << toString(inputType); NN_RET_CHECK(validateInputTypes(context, {inputType, inputType})); NN_RET_CHECK(validateOutputTypes(context, {OperandType::TENSOR_BOOL8})); if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - return Version::ANDROID_R; + return validateHalVersion(context, HalVersion::V1_3); } else { - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } }
diff --git a/common/operations/Concatenation.cpp b/common/operations/Concatenation.cpp index 7844b65..08c9c61 100644 --- a/common/operations/Concatenation.cpp +++ b/common/operations/Concatenation.cpp
@@ -14,25 +14,22 @@ * limitations under the License. */ +#include "OperationsUtils.h" #define LOG_TAG "Operations" -#include <algorithm> -#include <iterator> -#include <vector> - -#include "OperationResolver.h" -#include "OperationsUtils.h" -#include "Tracing.h" -#include "nnapi/Validation.h" - -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h> #include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h> #include <tensorflow/lite/kernels/internal/reference/reference_ops.h> #include <tensorflow/lite/kernels/internal/types.h> +#include <algorithm> +#include <iterator> +#include <vector> + #include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION +#include "HalInterfaces.h" +#include "OperationResolver.h" +#include "Tracing.h" namespace android { namespace nn { @@ -43,9 +40,10 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + template <typename T> bool concatenation(const std::vector<const T*>& inputDataPtrs, const std::vector<Shape>& inputShapes, int32_t axis, T* outputData, @@ -139,32 +137,30 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { uint32_t inputCount = context->getNumInputs(); NN_RET_CHECK_GE(inputCount, 2); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); const OperandType inputType = context->getInputType(0); - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM) { - minSupportedVersion = Version::ANDROID_OC_MR1; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); } else if (inputType == OperandType::TENSOR_FLOAT16) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); } else { NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } std::vector<OperandType> inExpectedTypes(inputCount - 1, inputType); inExpectedTypes.push_back(OperandType::INT32); - if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + if (context->getHalVersion() < HalVersion::V1_2 && + inputType == OperandType::TENSOR_QUANT8_ASYMM) { const Shape& output = context->getOutputShape(kOutputTensor); for (uint32_t i = 0; i < inputCount - 1; ++i) { const Shape& input = context->getInputShape(i); - if (input.scale != output.scale || input.offset != output.offset) { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_Q); - } + NN_RET_CHECK_EQ(input.scale, output.scale); + NN_RET_CHECK_EQ(input.offset, output.offset); } } for (uint32_t i = 0; i < inputCount - 1; ++i) { @@ -173,12 +169,10 @@ NN_RET_CHECK_LE(inputRank, 4); } } - NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, inExpectedTypes) && + validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { uint32_t numInputs = context->getNumInputs(); NN_RET_CHECK_GE(numInputs, 2); @@ -226,7 +220,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace concatenation
diff --git a/common/operations/Conv2D.cpp b/common/operations/Conv2D.cpp index 9eb0f8b..8246266 100644 --- a/common/operations/Conv2D.cpp +++ b/common/operations/Conv2D.cpp
@@ -16,24 +16,22 @@ #define LOG_TAG "Operations" +#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h> +#include <tensorflow/lite/kernels/internal/reference/integer_ops/conv.h> +#include <tensorflow/lite/kernels/internal/types.h> + #include <algorithm> #include <iterator> #include <memory> #include <vector> -#include "LegacyUtils.h" +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "Operations.h" #include "OperationsUtils.h" #include "Tracing.h" - -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h> -#include <tensorflow/lite/kernels/internal/reference/integer_ops/conv.h> -#include <tensorflow/lite/kernels/internal/types.h> - -#include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION +#include "Utils.h" namespace android { namespace nn { @@ -51,6 +49,8 @@ namespace { +using namespace hal; + // If possible we will use this static buffer for the tensor. constexpr size_t kStaticBufferSize = 1605632; char static_scratch_buffer[kStaticBufferSize]; @@ -129,7 +129,6 @@ } }; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #define ANDROID_NN_CONV_PARAMETERS(Type) \ uint32_t height = getSizeOfDimension(inputShape, 1); \ uint32_t width = getSizeOfDimension(inputShape, 2); \ @@ -175,22 +174,6 @@ im2colGuard.reset(im2colData); \ } -bool needim2colData(const Shape& filterShape, int32_t stride_width, int32_t stride_height, - int32_t dilation_width_factor, int32_t dilation_height_factor) { - // Within tflite::optimized_ops::Conv, the following tests are performed, - // and in the case (!need_dilated_im2col && !need_im2col), then the - // method doesn't expect to receive outputData. In debug mode this is - // asserted and fails tests, so we need to perform this check as the caller - // also. See: - // tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h:2655 - const int filter_width = getSizeOfDimension(filterShape, 2); - const int filter_height = getSizeOfDimension(filterShape, 1); - const bool need_dilated_im2col = dilation_width_factor != 1 || dilation_height_factor != 1; - const bool need_im2col = - stride_width != 1 || stride_height != 1 || filter_width != 1 || filter_height != 1; - return need_dilated_im2col || need_im2col; -} - bool convNhwc(const float* inputData, const Shape& inputShape, const float* filterData, const Shape& filterShape, const float* biasData, const Shape& biasShape, int32_t padding_left, int32_t padding_right, int32_t padding_top, @@ -207,16 +190,12 @@ // Prevent concurrent executions that may access the scratch buffer. std::unique_lock<std::mutex> lock(executionMutex); NNTRACE_COMP_SWITCH("optimized_ops::Conv"); - - const bool need_im2colData = needim2colData(filterShape, stride_width, stride_height, - dilation_width_factor, dilation_height_factor); - - tflite::optimized_ops::Conv( - inputData, convertShapeToDims(inputShape), filterData, convertShapeToDims(filterShape), - biasData, convertShapeToDims(biasShape), stride_width, stride_height, - dilation_width_factor, dilation_height_factor, paddingWidth, paddingHeight, - output_activation_min, output_activation_max, outputData, - convertShapeToDims(outputShape), need_im2colData ? im2colData : nullptr, im2colDim); + tflite::optimized_ops::Conv(inputData, convertShapeToDims(inputShape), filterData, + convertShapeToDims(filterShape), biasData, + convertShapeToDims(biasShape), stride_width, stride_height, + dilation_width_factor, dilation_height_factor, paddingWidth, + paddingHeight, output_activation_min, output_activation_max, + outputData, convertShapeToDims(outputShape), im2colData, im2colDim); return true; } @@ -257,18 +236,13 @@ gemm_context.set_max_num_threads(0); NNTRACE_COMP_SWITCH("optimized_ops::Conv"); - - const bool need_im2colData = needim2colData(filterShape, stride_width, stride_height, - dilation_width_factor, dilation_height_factor); - - tflite::optimized_ops::Conv(inputData, convertShapeToDims(inputShape), inputOffset, filterData, - convertShapeToDims(filterShape), filterOffset, biasData, - convertShapeToDims(biasShape), stride_width, stride_height, - dilation_width_factor, dilation_height_factor, paddingWidth, - paddingHeight, outputOffset, output_multiplier, output_shift, - output_activation_min, output_activation_max, outputData, - convertShapeToDims(outputShape), - need_im2colData ? im2colData : nullptr, im2colDim, &gemm_context); + tflite::optimized_ops::Conv( + inputData, convertShapeToDims(inputShape), inputOffset, filterData, + convertShapeToDims(filterShape), filterOffset, biasData, convertShapeToDims(biasShape), + stride_width, stride_height, dilation_width_factor, dilation_height_factor, + paddingWidth, paddingHeight, outputOffset, output_multiplier, output_shift, + output_activation_min, output_activation_max, outputData, + convertShapeToDims(outputShape), im2colData, im2colDim, &gemm_context); return true; } @@ -527,11 +501,10 @@ } #undef ANDROID_NN_CONV_PARAMETERS -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { const uint32_t numInputs = context->getNumInputs(); NN_RET_CHECK( std::binary_search(std::begin(kNumInputsArray), std::end(kNumInputsArray), numInputs)); @@ -568,9 +541,7 @@ OperandType::INT32}; if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>( - context->getInputExtraParams(kFilterTensor)) - .channelDim, + NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim, 0) << "Unsupported filter tensor channel dimension for operation " << kOperationName; @@ -617,22 +588,19 @@ } } - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); } else if (inputType == OperandType::TENSOR_FLOAT16 || filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || withLayout || withDilation || !meetsQuantizedScaleConstraintBeforeV1_2) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } else { - minSupportedVersion = Version::ANDROID_OC_MR1; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); } - NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, inExpectedTypes) && + validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape input = context->getInputShape(kInputTensor); Shape filter = context->getInputShape(kFilterTensor); @@ -734,9 +702,7 @@ context->getInputShape(kInputTensor), context->getInputBuffer<int8_t>(kFilterTensor), context->getInputShape(kFilterTensor), - std::get<Operand::SymmPerChannelQuantParams>( - context->getInputExtraParams(kFilterTensor)) - .scales.data(), + context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(), context->getInputBuffer<int32_t>(kBiasTensor), context->getInputShape(kBiasTensor), param.padding_left, param.padding_right, param.padding_top, param.padding_bottom, @@ -767,9 +733,7 @@ context->getInputShape(kInputTensor), context->getInputBuffer<int8_t>(kFilterTensor), context->getInputShape(kFilterTensor), - std::get<Operand::SymmPerChannelQuantParams>( - context->getInputExtraParams(kFilterTensor)) - .scales.data(), + context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(), context->getInputBuffer<int32_t>(kBiasTensor), context->getInputShape(kBiasTensor), param.padding_left, param.padding_right, param.padding_top, param.padding_bottom, @@ -797,7 +761,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace conv_2d
diff --git a/common/operations/DepthwiseConv2D.cpp b/common/operations/DepthwiseConv2D.cpp index ed3cae7..32e8b55 100644 --- a/common/operations/DepthwiseConv2D.cpp +++ b/common/operations/DepthwiseConv2D.cpp
@@ -16,19 +16,16 @@ #define LOG_TAG "Operations" -#include <algorithm> -#include <vector> - -#include "OperationResolver.h" -#include "Operations.h" -#include "Tracing.h" - -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #include <tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h> #include <tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h> +#include <algorithm> +#include <vector> + #include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION +#include "OperationResolver.h" +#include "Operations.h" +#include "Tracing.h" namespace android { namespace nn { @@ -43,9 +40,10 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + struct DepthwiseConv2dParam { int32_t padding_left, padding_right; int32_t padding_top, padding_bottom; @@ -416,9 +414,8 @@ #undef ANDROID_NN_DEPTHWISE_CONV_PARAMETERS } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { const uint32_t numInputs = context->getNumInputs(); NN_RET_CHECK( std::binary_search(std::begin(kNumInputsArray), std::end(kNumInputsArray), numInputs)); @@ -446,9 +443,7 @@ filterType == inputType) << "Unsupported filter tensor type for operation " << kOperationName; if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>( - context->getInputExtraParams(kFilterTensor)) - .channelDim, + NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim, 3) << "Unsupported filter tensor channel dimension for operation " << kOperationName; @@ -500,22 +495,19 @@ } } - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); } else if (inputType == OperandType::TENSOR_FLOAT16 || filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || withLayout || withDilation || !meetsQuantizedScaleConstraintBeforeV1_2) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } else { - minSupportedVersion = Version::ANDROID_OC_MR1; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); } - NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, inExpectedTypes) && + validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape input = context->getInputShape(kInputTensor); Shape filter = context->getInputShape(kFilterTensor); @@ -615,9 +607,7 @@ context->getInputShape(kInputTensor), context->getInputBuffer<int8_t>(kFilterTensor), context->getInputShape(kFilterTensor), - std::get<Operand::SymmPerChannelQuantParams>( - context->getInputExtraParams(kFilterTensor)) - .scales.data(), + context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(), context->getInputBuffer<int32_t>(kBiasTensor), context->getInputShape(kBiasTensor), param.padding_left, param.padding_right, param.padding_top, param.padding_bottom, @@ -649,9 +639,7 @@ context->getInputShape(kInputTensor), context->getInputBuffer<int8_t>(kFilterTensor), context->getInputShape(kFilterTensor), - std::get<Operand::SymmPerChannelQuantParams>( - context->getInputExtraParams(kFilterTensor)) - .scales.data(), + context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(), context->getInputBuffer<int32_t>(kBiasTensor), context->getInputShape(kBiasTensor), param.padding_left, param.padding_right, param.padding_top, param.padding_bottom, @@ -680,7 +668,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace depthwise_conv_2d
diff --git a/common/operations/Dequantize.cpp b/common/operations/Dequantize.cpp index 931bcc6..2fb2d5c 100644 --- a/common/operations/Dequantize.cpp +++ b/common/operations/Dequantize.cpp
@@ -14,11 +14,12 @@ * limitations under the License. */ +#include "OperationsUtils.h" #define LOG_TAG "Operations" +#include "HalInterfaces.h" #include "IndexedShapeWrapper.h" #include "OperationResolver.h" -#include "OperationsUtils.h" namespace android { namespace nn { @@ -32,6 +33,8 @@ namespace { +using namespace hal; + template <typename InputType, typename OutputType> bool compute(const InputType* inputData, const Shape& inputShape, OutputType* outputData) { const int numElements = getNumberOfElements(inputShape); @@ -49,8 +52,7 @@ // First we calculate a stride which is the number of elements we need to // skip to change an index along a dimension with different quantization // scales. - const int channelDim = - std::get<Operand::SymmPerChannelQuantParams>(inputShape.extraParams).channelDim; + const int channelDim = inputShape.extraParams.channelQuant().channelDim; int stride = 1; for (int i = getNumberOfDimensions(inputShape) - 1; i > channelDim; --i) { stride *= getSizeOfDimension(inputShape, i); @@ -65,8 +67,7 @@ // size of the dimension (so that we don't have an overflow if the // channelDim is not 0). const int scaleIndex = (i / stride) % getSizeOfDimension(inputShape, channelDim); - const float scale = std::get<Operand::SymmPerChannelQuantParams>(inputShape.extraParams) - .scales[scaleIndex]; + const float scale = inputShape.extraParams.channelQuant().scales[scaleIndex]; const int32_t value = inputData[i]; outputData[i] = static_cast<OutputType>(scale * (value - zeroPoint)); } @@ -75,7 +76,7 @@ } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); @@ -89,18 +90,18 @@ if (inputType == OperandType::TENSOR_QUANT8_ASYMM && outputType == OperandType::TENSOR_FLOAT32) { - return Version::ANDROID_OC_MR1; + return validateHalVersion(context, HalVersion::V1_0); } NN_RET_CHECK(inputType == OperandType::TENSOR_QUANT8_ASYMM || inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || inputType == OperandType::TENSOR_QUANT8_SYMM || inputType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) - << "Unsupported input operand type for DEQUANTIZE op: " << inputType; + << "Unsupported input operand type for DEQUANTIZE op: " << toString(inputType); NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 || outputType == OperandType::TENSOR_FLOAT32) - << "Unsupported output operand type for DEQUANTIZE op: " << outputType; - return Version::ANDROID_Q; + << "Unsupported output operand type for DEQUANTIZE op: " << toString(outputType); + return validateHalVersion(context, HalVersion::V1_2); } bool prepare(IOperationExecutionContext* context) { @@ -154,7 +155,7 @@ } } NN_RET_CHECK_FAIL() << "Unsupported tensor types combination for dequantize op. (input type: " - << inputType << " output type: " << outputType << ")"; + << toString(inputType) << " output type: " << toString(outputType) << ")"; } } // namespace dequantize
diff --git a/common/operations/Elementwise.cpp b/common/operations/Elementwise.cpp index 8510003..82a2687 100644 --- a/common/operations/Elementwise.cpp +++ b/common/operations/Elementwise.cpp
@@ -18,6 +18,7 @@ #include <cmath> +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" #include "Tracing.h" @@ -34,6 +35,8 @@ namespace { +using namespace hal; + template <typename IntermediateType, typename T> inline bool compute(IntermediateType func(IntermediateType), const T* input, const Shape& shape, T* output) { @@ -82,7 +85,7 @@ } } -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor); @@ -91,10 +94,10 @@ << "Unsupported tensor type for elementwise operation"; NN_RET_CHECK(validateInputTypes(context, {inputType})); NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } -Result<Version> validateAbs(const IOperationValidationContext* context) { +bool validateAbs(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor); @@ -103,10 +106,11 @@ << "Unsupported tensor type for operation ABS"; NN_RET_CHECK(validateInputTypes(context, {inputType})); NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return inputType == OperandType::TENSOR_INT32 ? Version::ANDROID_R : Version::ANDROID_Q; + return validateHalVersion(context, (inputType == OperandType::TENSOR_INT32 ? HalVersion::V1_3 + : HalVersion::V1_2)); } -Result<Version> validateFloor(const IOperationValidationContext* context) { +bool validateFloor(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); @@ -122,7 +126,9 @@ NN_RET_CHECK_LE(getNumberOfDimensions(input), 4); } - return inputType == OperandType::TENSOR_FLOAT16 ? Version::ANDROID_Q : Version::ANDROID_OC_MR1; + return validateHalVersion( + context, + (inputType == OperandType::TENSOR_FLOAT16 ? HalVersion::V1_2 : HalVersion::V1_0)); } bool prepare(IOperationExecutionContext* context) {
diff --git a/common/operations/Elu.cpp b/common/operations/Elu.cpp index 98e0662..07304e7 100644 --- a/common/operations/Elu.cpp +++ b/common/operations/Elu.cpp
@@ -20,6 +20,7 @@ #include <cmath> #include <vector> +#include "HalInterfaces.h" #include "IndexedShapeWrapper.h" #include "OperationResolver.h" #include "OperationsUtils.h" @@ -29,6 +30,8 @@ namespace nn { namespace elu { +using namespace hal; + constexpr uint32_t kNumInputs = 2; constexpr uint32_t kInputTensor = 0; constexpr uint32_t kAlphaScalar = 1; @@ -52,21 +55,19 @@ } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); auto inputType = context->getInputType(kInputTensor); - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_FLOAT32) { - minSupportedVersion = Version::ANDROID_R; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); } else { NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation ELU"; } auto scalarType = inputType == OperandType::TENSOR_FLOAT16 ? OperandType::FLOAT16 : OperandType::FLOAT32; - NN_RET_CHECK(validateInputTypes(context, {inputType, scalarType})); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, {inputType, scalarType}) && + validateOutputTypes(context, {inputType}); } bool prepare(IOperationExecutionContext* context) {
diff --git a/common/operations/EmbeddingLookup.cpp b/common/operations/EmbeddingLookup.cpp index b967f1f..12e4a65 100644 --- a/common/operations/EmbeddingLookup.cpp +++ b/common/operations/EmbeddingLookup.cpp
@@ -19,12 +19,16 @@ #include "EmbeddingLookup.h" #include "CpuExecutor.h" +#include "HalInterfaces.h" #include "Operations.h" + #include "Tracing.h" namespace android { namespace nn { +using namespace hal; + EmbeddingLookup::EmbeddingLookup(const Operation& operation, RunTimeOperandInfo* operands) { value_ = GetInput(operation, operands, kValueTensor); lookup_ = GetInput(operation, operands, kLookupTensor);
diff --git a/common/operations/EmbeddingLookup.h b/common/operations/EmbeddingLookup.h index 0388b35..9a82dda 100644 --- a/common/operations/EmbeddingLookup.h +++ b/common/operations/EmbeddingLookup.h
@@ -19,7 +19,7 @@ #include <vector> -#include "nnapi/Types.h" +#include "HalInterfaces.h" namespace android { namespace nn { @@ -28,7 +28,7 @@ class EmbeddingLookup { public: - EmbeddingLookup(const Operation& operation, RunTimeOperandInfo* operands); + EmbeddingLookup(const hal::Operation& operation, RunTimeOperandInfo* operands); bool Eval();
diff --git a/common/operations/EmbeddingLookupTest.cpp b/common/operations/EmbeddingLookupTest.cpp index e1261c8..10e2e33 100644 --- a/common/operations/EmbeddingLookupTest.cpp +++ b/common/operations/EmbeddingLookupTest.cpp
@@ -14,14 +14,13 @@ * limitations under the License. */ -#include <gmock/gmock.h> -#include <gtest/gtest.h> - -#include <vector> - #include "EmbeddingLookup.h" + #include "NeuralNetworksWrapper.h" +#include <gmock/gmock-matchers.h> +#include <gtest/gtest.h> + using ::testing::FloatNear; using ::testing::Matcher;
diff --git a/common/operations/ExpandDims.cpp b/common/operations/ExpandDims.cpp index 435b3c7..2f546c9 100644 --- a/common/operations/ExpandDims.cpp +++ b/common/operations/ExpandDims.cpp
@@ -18,7 +18,7 @@ #include "ExpandDims.h" -#include "LegacyUtils.h" +#include "Utils.h" namespace android { namespace nn {
diff --git a/common/operations/Fill.cpp b/common/operations/Fill.cpp index 1689d4c..a6b3906 100644 --- a/common/operations/Fill.cpp +++ b/common/operations/Fill.cpp
@@ -14,10 +14,11 @@ * limitations under the License. */ +#include "OperationsUtils.h" #define LOG_TAG "Operations" +#include "HalInterfaces.h" #include "OperationResolver.h" -#include "OperationsUtils.h" namespace android { namespace nn { @@ -32,6 +33,8 @@ namespace { +using namespace hal; + template <typename T> bool executeTyped(IOperationExecutionContext* context) { T* output = context->getOutputBuffer<T>(kOutputTensor); @@ -55,13 +58,13 @@ *valueType = OperandType::INT32; return true; default: - NN_RET_CHECK_FAIL() << "Unsupported value type for fill op: " << outputType; + NN_RET_CHECK_FAIL() << "Unsupported value type for fill op: " << toString(outputType); } } } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); // Check output type first because input value type is dependent on the @@ -70,14 +73,14 @@ NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 || outputType == OperandType::TENSOR_FLOAT32 || outputType == OperandType::TENSOR_INT32) - << "Unsupported output type for fill op: " << outputType; + << "Unsupported output type for fill op: " << toString(outputType); NN_RET_CHECK(validateOutputTypes(context, {outputType})); OperandType valueType; NN_RET_CHECK(getValueType(outputType, &valueType)); NN_RET_CHECK(validateInputTypes(context, {OperandType::TENSOR_INT32, valueType})); - return Version::ANDROID_R; + return validateHalVersion(context, HalVersion::V1_3); } bool prepare(IOperationExecutionContext* context) {
diff --git a/common/operations/FullyConnected.cpp b/common/operations/FullyConnected.cpp index 18832d3..9bdd0ba 100644 --- a/common/operations/FullyConnected.cpp +++ b/common/operations/FullyConnected.cpp
@@ -14,21 +14,19 @@ * limitations under the License. */ +#include "tensorflow/lite/kernels/internal/types.h" #define LOG_TAG "Operations" -#include <vector> - -#include "OperationResolver.h" -#include "Tracing.h" - -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h> #include <tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h> #include <tensorflow/lite/kernels/internal/reference/reference_ops.h> -#include <tensorflow/lite/kernels/internal/types.h> + +#include <vector> #include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION +#include "HalInterfaces.h" +#include "OperationResolver.h" +#include "Tracing.h" namespace android { namespace nn { @@ -47,7 +45,8 @@ namespace { -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION +using namespace hal; + // executionMutex is used to protect concurrent access of non-threadsafe resources // like gemmlowp::GemmContext. // std::mutex is safe for pthreads on Android. @@ -180,7 +179,6 @@ return true; } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION bool validateShapes(const Shape& input, const Shape& weights, const Shape& bias, Shape* output = nullptr) { @@ -222,15 +220,14 @@ } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); auto inputType = context->getInputType(kInputTensor); std::vector<OperandType> inExpectedTypes; std::vector<OperandType> outExpectedTypes; - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_FLOAT32) { - minSupportedVersion = Version::ANDROID_OC_MR1; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); inExpectedTypes = { OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, @@ -238,7 +235,7 @@ OperandType::INT32, }; } else if (inputType == OperandType::TENSOR_FLOAT16) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); inExpectedTypes = { OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, @@ -255,9 +252,9 @@ bool meetsQuantizedScaleConstraintBeforeV1_2 = (outputScale > inputScale * weightsScale); if (!meetsQuantizedScaleConstraintBeforeV1_2) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } else { - minSupportedVersion = Version::ANDROID_OC_MR1; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); } inExpectedTypes = { @@ -267,7 +264,7 @@ OperandType::INT32, }; } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); inExpectedTypes = { OperandType::TENSOR_QUANT8_ASYMM_SIGNED, @@ -277,6 +274,7 @@ }; } else { NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " << kOperationName; + return false; } NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); NN_RET_CHECK(validateOutputTypes(context, {inputType})); @@ -288,10 +286,9 @@ NN_RET_CHECK(validateShapes(input, weights, bias)); } - return minSupportedVersion; + return true; } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape input = context->getInputShape(kInputTensor); Shape weights = context->getInputShape(kWeightsTensor); @@ -349,7 +346,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace fully_connected
diff --git a/common/operations/Gather.cpp b/common/operations/Gather.cpp index 5571a65..d496d6a 100644 --- a/common/operations/Gather.cpp +++ b/common/operations/Gather.cpp
@@ -16,6 +16,7 @@ #define LOG_TAG "Operations" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" #include "Tracing.h" @@ -36,6 +37,8 @@ namespace { +using namespace hal; + template <typename T> inline bool eval(const T* inputData, const Shape& inputShape, int32_t axis, const int32_t* indicesData, const Shape& indicesShape, T* outputData) { @@ -59,7 +62,7 @@ } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor); @@ -73,9 +76,9 @@ {inputType, OperandType::INT32, OperandType::TENSOR_INT32})); NN_RET_CHECK(validateOutputTypes(context, {inputType})); if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - return Version::ANDROID_R; + return validateHalVersion(context, HalVersion::V1_3); } else { - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } }
diff --git a/common/operations/GenerateProposals.cpp b/common/operations/GenerateProposals.cpp index 15f2b52..4e3aa3f 100644 --- a/common/operations/GenerateProposals.cpp +++ b/common/operations/GenerateProposals.cpp
@@ -23,21 +23,20 @@ #include <utility> #include <vector> +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" #include "Tracing.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION - namespace android { namespace nn { namespace bbox_ops { -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + struct BoxEncodingCorner { float x1, y1, x2, y2; }; @@ -187,7 +186,6 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION namespace axis_aligned_bbox_transform { @@ -202,7 +200,7 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); std::vector<OperandType> inExpectedTypes; @@ -216,17 +214,18 @@ inExpectedTypes = {OperandType::TENSOR_QUANT16_ASYMM, deltaInputType, OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_ASYMM}; } else { - return NN_ERROR() << "Unsupported input tensor type for operation " << kOperationName; + LOG(ERROR) << "Unsupported input tensor type for operation " << kOperationName; + return false; } } else { - return NN_ERROR() << "Unsupported input tensor type for operation " << kOperationName; + LOG(ERROR) << "Unsupported input tensor type for operation " << kOperationName; + return false; } NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape roiShape = context->getInputShape(kRoiTensor); Shape bboxDeltasShape = context->getInputShape(kDeltaTensor); @@ -328,7 +327,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace axis_aligned_bbox_transform @@ -353,7 +351,6 @@ constexpr uint32_t kOutputClassTensor = 2; constexpr uint32_t kOutputBatchesTensor = 3; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { // TODO(xusongw): Reduce code duplication with hard/soft nms path. @@ -708,9 +705,8 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); std::vector<OperandType> inExpectedTypes; @@ -749,13 +745,12 @@ NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); NN_RET_CHECK(validateOutputTypes(context, outExpectedTypes)); if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - return Version::ANDROID_R; + return validateHalVersion(context, HalVersion::V1_3); } else { - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape scoreShape = context->getInputShape(kScoreTensor); Shape roiShape = context->getInputShape(kRoiTensor); @@ -908,7 +903,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace box_with_nms_limit @@ -934,7 +928,6 @@ constexpr uint32_t kOutputRoiTensor = 1; constexpr uint32_t kOutputBatchesTensor = 2; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { void filterBoxes(const float* roiBase, const float* imageInfoBase, float minSize, @@ -1222,9 +1215,8 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); std::vector<OperandType> inExpectedTypes; @@ -1279,13 +1271,12 @@ NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); NN_RET_CHECK(validateOutputTypes(context, outExpectedTypes)); if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - return Version::ANDROID_R; + return validateHalVersion(context, HalVersion::V1_3); } else { - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { bool useNchw = context->getInputValue<bool>(kLayoutScalar); Shape scoreShape = context->getInputShape(kScoreTensor); @@ -1415,7 +1406,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace generate_proposals @@ -1445,7 +1435,6 @@ constexpr uint32_t kOutputClassTensor = 2; constexpr uint32_t kOutputDetectionTensor = 3; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { bool detectionPostprocessFloat32( @@ -1582,9 +1571,8 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); std::vector<OperandType> inExpectedTypes; @@ -1612,10 +1600,9 @@ NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); NN_RET_CHECK(validateOutputTypes( context, {inputType, inputType, OperandType::TENSOR_INT32, OperandType::TENSOR_INT32})); - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape scoreShape = context->getInputShape(kScoreTensor); Shape deltasShape = context->getInputShape(kDeltaTensor); @@ -1752,7 +1739,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace detection_postprocess
diff --git a/common/operations/HashtableLookup.cpp b/common/operations/HashtableLookup.cpp index 4773a17..287c866 100644 --- a/common/operations/HashtableLookup.cpp +++ b/common/operations/HashtableLookup.cpp
@@ -19,7 +19,9 @@ #include "HashtableLookup.h" #include "CpuExecutor.h" +#include "HalInterfaces.h" #include "Operations.h" + #include "Tracing.h" namespace android { @@ -27,6 +29,8 @@ namespace { +using namespace hal; + int greater(const void* a, const void* b) { return *static_cast<const int*>(a) - *static_cast<const int*>(b); }
diff --git a/common/operations/HashtableLookup.h b/common/operations/HashtableLookup.h index 1ae554f..c0921e0 100644 --- a/common/operations/HashtableLookup.h +++ b/common/operations/HashtableLookup.h
@@ -19,7 +19,7 @@ #include <vector> -#include "nnapi/Types.h" +#include "HalInterfaces.h" namespace android { namespace nn { @@ -28,7 +28,7 @@ class HashtableLookup { public: - HashtableLookup(const Operation& operation, RunTimeOperandInfo* operands); + HashtableLookup(const hal::Operation& operation, RunTimeOperandInfo* operands); bool Eval();
diff --git a/common/operations/HashtableLookupTest.cpp b/common/operations/HashtableLookupTest.cpp index 40e45f8..ff62006 100644 --- a/common/operations/HashtableLookupTest.cpp +++ b/common/operations/HashtableLookupTest.cpp
@@ -14,14 +14,13 @@ * limitations under the License. */ -#include <gmock/gmock.h> -#include <gtest/gtest.h> - -#include <vector> - #include "HashtableLookup.h" + #include "NeuralNetworksWrapper.h" +#include <gmock/gmock-matchers.h> +#include <gtest/gtest.h> + using ::testing::FloatNear; using ::testing::Matcher;
diff --git a/common/operations/HeatmapMaxKeypoint.cpp b/common/operations/HeatmapMaxKeypoint.cpp index b9118e4..3608ca5 100644 --- a/common/operations/HeatmapMaxKeypoint.cpp +++ b/common/operations/HeatmapMaxKeypoint.cpp
@@ -21,14 +21,12 @@ #include <cmath> #include <vector> +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" #include "Tracing.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION - namespace android { namespace nn { namespace heatmap_max_keypoint { @@ -44,9 +42,10 @@ constexpr uint32_t kOutputScoreTensor = 0; constexpr uint32_t kOutputKeypointTensor = 1; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + // This function uses Taylor expansion up to the quatratic term to approximate bicubic // upscaling result. // 2nd order Taylor expansion: D(x) = D - b'x + 1/2 * x'Ax @@ -227,15 +226,14 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); std::vector<OperandType> inExpectedTypes; std::vector<OperandType> outExpectedTypes; auto inputType = context->getInputType(kHeatmapTensor); - auto minSupportedVersion = Version::ANDROID_Q; + auto minSupportedHalVersion = HalVersion::V1_2; if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_FLOAT16) { inExpectedTypes = {inputType, inputType, OperandType::BOOL}; outExpectedTypes = {inputType, inputType}; @@ -248,16 +246,16 @@ OperandType::TENSOR_QUANT16_ASYMM, OperandType::BOOL}; outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::TENSOR_QUANT16_ASYMM}; - minSupportedVersion = Version::ANDROID_R; + minSupportedHalVersion = HalVersion::V1_3; } else { - return NN_ERROR() << "Unsupported input tensor type for operation " << kOperationName; + LOG(ERROR) << "Unsupported input tensor type for operation " << kOperationName; + return false; } NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); NN_RET_CHECK(validateOutputTypes(context, outExpectedTypes)); - return minSupportedVersion; + return validateHalVersion(context, minSupportedHalVersion); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { bool layout = context->getInputValue<bool>(kLayoutScalar); Shape heatmapShape = context->getInputShape(kHeatmapTensor); @@ -361,7 +359,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace heatmap_max_keypoint
diff --git a/common/operations/InstanceNormalization.cpp b/common/operations/InstanceNormalization.cpp index 54bcfc6..75b907b 100644 --- a/common/operations/InstanceNormalization.cpp +++ b/common/operations/InstanceNormalization.cpp
@@ -19,13 +19,11 @@ #include <cmath> #include <vector> +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "Tracing.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION - namespace android { namespace nn { namespace instance_normalization { @@ -42,9 +40,10 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + template <typename T> inline bool instanceNormNhwc(const T* inputData, const Shape& inputShape, T gamma, T beta, T epsilon, T* outputData, const Shape& outputShape) { @@ -102,9 +101,8 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); std::vector<OperandType> inExpectedTypes; @@ -116,14 +114,14 @@ inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::FLOAT16, OperandType::FLOAT16, OperandType::FLOAT16, OperandType::BOOL}; } else { - return NN_ERROR() << "Unsupported input tensor type for operation " << kOperationName; + LOG(ERROR) << "Unsupported input tensor type for operation " << kOperationName; + return false; } NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape input = context->getInputShape(kInputTensor); NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4); @@ -154,7 +152,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace instance_normalization
diff --git a/common/operations/L2Normalization.cpp b/common/operations/L2Normalization.cpp index 1715beb..1f0c9d0 100644 --- a/common/operations/L2Normalization.cpp +++ b/common/operations/L2Normalization.cpp
@@ -16,18 +16,16 @@ #define LOG_TAG "Operations" -#include <algorithm> -#include <vector> - -#include "OperationResolver.h" -#include "Tracing.h" - -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h> #include <tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h> +#include <algorithm> +#include <vector> + #include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION +#include "HalInterfaces.h" +#include "OperationResolver.h" +#include "Tracing.h" namespace android { namespace nn { @@ -42,9 +40,10 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + inline bool l2normFloat32Impl(const float* inputData, const Shape& inputShape, int32_t axis, float* outputData, const Shape& outputShape) { NNTRACE_TRANS("l2normFloat32"); @@ -199,41 +198,37 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK(context->getNumInputs() == kNumInputs || context->getNumInputs() == kNumInputs - 1); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); const OperandType inputType = context->getInputType(kInputTensor); std::vector<OperandType> inExpectedTypes = {inputType}; - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_QUANT8_ASYMM) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } else if (inputType == OperandType::TENSOR_FLOAT32) { - minSupportedVersion = Version::ANDROID_OC_MR1; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); } else { NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } if (context->getNumInputs() == kNumInputs) { inExpectedTypes.push_back(OperandType::INT32); - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } else if (context->getInputShape(kInputTensor).dimensions.size() != 4) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } const Shape& input = context->getInputShape(kInputTensor); if (hasKnownRank(input)) { NN_RET_CHECK_LE(getNumberOfDimensions(input), 4); } - NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, inExpectedTypes) && + validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { const Shape& input = context->getInputShape(kInputTensor); int32_t numDimensions = getNumberOfDimensions(input); @@ -289,7 +284,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace l2_norm
diff --git a/common/operations/LSHProjection.cpp b/common/operations/LSHProjection.cpp index c3f051d..bdb106e 100644 --- a/common/operations/LSHProjection.cpp +++ b/common/operations/LSHProjection.cpp
@@ -18,18 +18,19 @@ #include "LSHProjection.h" -#include <utils/hash/farmhash.h> - -#include <memory> - #include "CpuExecutor.h" -#include "LegacyUtils.h" +#include "HalInterfaces.h" #include "Tracing.h" -#include "nnapi/Types.h" +#include "Utils.h" + +#include <utils/hash/farmhash.h> +#include <memory> namespace android { namespace nn { +using namespace hal; + LSHProjection::LSHProjection(const Operation& operation, RunTimeOperandInfo* operands) { input_ = GetInput(operation, operands, kInputTensor); weight_ = GetInput(operation, operands, kWeightTensor); @@ -111,7 +112,7 @@ int64_t hash_signature = farmhash::Fingerprint64(key.get(), key_bytes); double running_value = static_cast<double>(hash_signature); input_ptr += input_item_bytes; - if (weight->lifetime == Operand::LifeTime::NO_VALUE) { + if (weight->lifetime == OperandLifeTime::NO_VALUE) { score += running_value; } else { score += static_cast<double>(reinterpret_cast<T*>(weight->buffer)[i]) * running_value;
diff --git a/common/operations/LSHProjection.h b/common/operations/LSHProjection.h index 3a953a0..520f58a 100644 --- a/common/operations/LSHProjection.h +++ b/common/operations/LSHProjection.h
@@ -19,7 +19,7 @@ #include <vector> -#include "nnapi/Types.h" +#include "HalInterfaces.h" namespace android { namespace nn { @@ -36,9 +36,9 @@ class LSHProjection { public: - LSHProjection(const Operation& operation, RunTimeOperandInfo* operands); + LSHProjection(const hal::Operation& operation, RunTimeOperandInfo* operands); - static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, + static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands, Shape* outputShape); template <typename T> bool Eval();
diff --git a/common/operations/LSHProjectionTest.cpp b/common/operations/LSHProjectionTest.cpp index 97313d6..8e27b64 100644 --- a/common/operations/LSHProjectionTest.cpp +++ b/common/operations/LSHProjectionTest.cpp
@@ -14,14 +14,14 @@ * limitations under the License. */ -#include <gmock/gmock.h> -#include <gtest/gtest.h> - -#include <vector> - #include "LSHProjection.h" + #include "NeuralNetworksWrapper.h" +#include <gmock/gmock.h> +#include <gmock/gmock-matchers.h> +#include <gtest/gtest.h> + using ::testing::FloatNear; using ::testing::Matcher;
diff --git a/common/operations/LSTM.cpp b/common/operations/LSTM.cpp index db7eb80..ba5d46a 100644 --- a/common/operations/LSTM.cpp +++ b/common/operations/LSTM.cpp
@@ -18,22 +18,22 @@ #include "LSTM.h" -#include <tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h> - #include <vector> #include "CpuExecutor.h" #include "CpuOperationUtils.h" -#include "LegacyUtils.h" +#include "HalInterfaces.h" #include "OperationsUtils.h" #include "Tracing.h" -#include "nnapi/Types.h" +#include "Utils.h" namespace android { namespace nn { namespace { +using namespace hal; + template <typename T> inline T* GetBuffer(RunTimeOperandInfo* operand) { return reinterpret_cast<T*>(operand->buffer); @@ -113,7 +113,7 @@ } else { // For LSTM from HAL v1.0 assign operands with no values static RunTimeOperandInfo no_value; - no_value.lifetime = Operand::LifeTime::NO_VALUE; + no_value.lifetime = OperandLifeTime::NO_VALUE; input_layer_norm_weights_ = &no_value; forget_layer_norm_weights_ = &no_value; @@ -221,8 +221,8 @@ // omitted ones can be omited in case CIFG LSTM is used. params->use_layer_norm = !IsNullInput(output_layer_norm_weights); - params->use_projection_weight = (projection_weights->lifetime != Operand::LifeTime::NO_VALUE); - params->use_projection_bias = (projection_bias->lifetime != Operand::LifeTime::NO_VALUE); + params->use_projection_weight = (projection_weights->lifetime != OperandLifeTime::NO_VALUE); + params->use_projection_bias = (projection_bias->lifetime != OperandLifeTime::NO_VALUE); // Make sure the input gate bias is present only when not a CIFG-LSTM. if (params->use_cifg) { @@ -477,8 +477,8 @@ : nullptr; float* outputCurrentTimeStep = outputData + (forwardSequence ? 0 : batchOutputSize * (maxTime - 1)); - const int batchInputDelta = (forwardSequence ? 1 : -1) * static_cast<int>(batchInputSize); - const int batchOutputDelta = (forwardSequence ? 1 : -1) * static_cast<int>(batchOutputSize); + const int batchInputDelta = forwardSequence ? batchInputSize : -batchInputSize; + const int batchOutputDelta = forwardSequence ? batchOutputSize : -batchOutputSize; for (int t = 0; t < maxTime; ++t) { LSTMStep(params, inputCurrentTimeStep, batchInputShape, input_to_input_weights_buffer, @@ -715,8 +715,8 @@ : nullptr; float* outputCurrentTimeStep = outputData + (forwardSequence ? 0 : batchOutputSize * (maxTime - 1)); - const int batchInputDelta = (forwardSequence ? 1 : -1) * static_cast<int>(batchInputSize); - const int batchOutputDelta = (forwardSequence ? 1 : -1) * static_cast<int>(batchOutputSize); + const int batchInputDelta = forwardSequence ? batchInputSize : -batchInputSize; + const int batchOutputDelta = forwardSequence ? batchOutputSize : -batchOutputSize; for (int t = 0; t < maxTime; ++t) { LSTMStep(params, inputCurrentTimeStep, batchInputShape, @@ -832,53 +832,56 @@ // For each batch and cell: compute input_weight * input. if (!params.use_cifg) { - tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate(input_to_input_weights_buffer, - n_cell, n_input, input_buffer, - n_batch, input_gate_scratch); + tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( + input_to_input_weights_buffer, n_cell, n_input, input_buffer, n_batch, + input_gate_scratch, /*result_stride*/ 1); } - tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate(input_to_forget_weights_buffer, - n_cell, n_input, input_buffer, - n_batch, forget_gate_scratch); tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( - input_to_cell_weights_buffer, n_cell, n_input, input_buffer, n_batch, cell_scratch); - tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate(input_to_output_weights_buffer, - n_cell, n_input, input_buffer, - n_batch, output_gate_scratch); + input_to_forget_weights_buffer, n_cell, n_input, input_buffer, n_batch, + forget_gate_scratch, /*result_stride*/ 1); + tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate(input_to_cell_weights_buffer, n_cell, + n_input, input_buffer, n_batch, + cell_scratch, /*result_stride*/ 1); + tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( + input_to_output_weights_buffer, n_cell, n_input, input_buffer, n_batch, + output_gate_scratch, /*result_stride*/ 1); // If auxiliary input is available then compute aux_input_weight * aux_input if (aux_input_buffer != nullptr) { if (!params.use_cifg) { tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( aux_input_to_input_weights_buffer, n_cell, n_aux_input, aux_input_buffer, - n_batch, input_gate_scratch); + n_batch, input_gate_scratch, + /*result_stride=*/1); } tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( aux_input_to_forget_weights_buffer, n_cell, n_aux_input, aux_input_buffer, n_batch, - forget_gate_scratch); + forget_gate_scratch, /*result_stride=*/1); tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( aux_input_to_cell_weights_buffer, n_cell, n_aux_input, aux_input_buffer, n_batch, - cell_scratch); + cell_scratch, /*result_stride=*/1); tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( aux_input_to_output_weights_buffer, n_cell, n_aux_input, aux_input_buffer, n_batch, - output_gate_scratch); + output_gate_scratch, /*result_stride=*/1); } // For each batch and cell: compute recurrent_weight * output_state. if (!params.use_cifg) { tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( recurrent_to_input_weights_buffer, n_cell, n_output, output_state_in_buffer, - n_batch, input_gate_scratch); + n_batch, input_gate_scratch, + /*result_stride*/ 1); } tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( recurrent_to_forget_weights_buffer, n_cell, n_output, output_state_in_buffer, n_batch, - forget_gate_scratch); + forget_gate_scratch, /*result_stride*/ 1); tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( recurrent_to_cell_weights_buffer, n_cell, n_output, output_state_in_buffer, n_batch, - cell_scratch); + cell_scratch, /*result_stride*/ 1); tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( recurrent_to_output_weights_buffer, n_cell, n_output, output_state_in_buffer, n_batch, - output_gate_scratch); + output_gate_scratch, /*result_stride*/ 1); // For each batch and cell: update input gate. if (!params.use_cifg) { @@ -939,8 +942,8 @@ cell_scratch, input_gate_scratch, n_batch * n_cell, cell_state_out_buffer); } if (params.cell_clip > 0.0) { - tflite::tensor_utils::CwiseClipping(cell_state_out_buffer, n_batch * n_cell, - params.cell_clip); + tflite::tensor_utils::ClipVector(cell_state_out_buffer, n_batch * n_cell, params.cell_clip, + cell_state_out_buffer); } // For each batch and cell: update the output gate. @@ -975,10 +978,11 @@ } tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( projection_weights_buffer, n_output, n_cell, output_gate_scratch, n_batch, - output_buffer); + output_buffer, + /*result_stride*/ 1); if (params.proj_clip > 0.0) { - tflite::tensor_utils::CwiseClipping(output_buffer, n_batch * n_output, - params.proj_clip); + tflite::tensor_utils::ClipVector(output_buffer, n_batch * n_output, params.proj_clip, + output_buffer); } } else { std::copy_n(output_gate_scratch, n_batch * n_output, output_buffer);
diff --git a/common/operations/LSTM.h b/common/operations/LSTM.h index dc6a43c..b48c3df 100644 --- a/common/operations/LSTM.h +++ b/common/operations/LSTM.h
@@ -24,7 +24,7 @@ #include <vector> #include "ActivationFunctor.h" -#include "nnapi/Types.h" +#include "HalInterfaces.h" namespace android { namespace nn { @@ -48,9 +48,9 @@ class LSTMCell { public: - LSTMCell(const Operation& operation, RunTimeOperandInfo* operands); + LSTMCell(const hal::Operation& operation, RunTimeOperandInfo* operands); - bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* scratchShape, + bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands, Shape* scratchShape, Shape* outputStateShape, Shape* cellStateShape, Shape* outputShape); bool Eval();
diff --git a/common/operations/LSTMTest.cpp b/common/operations/LSTMTest.cpp index 7ef0571..6fbc5d1 100644 --- a/common/operations/LSTMTest.cpp +++ b/common/operations/LSTMTest.cpp
@@ -14,13 +14,14 @@ * limitations under the License. */ -#include <gmock/gmock.h> -#include <gtest/gtest.h> +#include "LSTM.h" + +#include "NeuralNetworksWrapper.h" #include <vector> -#include "LSTM.h" -#include "NeuralNetworksWrapper.h" +#include <gmock/gmock-matchers.h> +#include <gtest/gtest.h> namespace android { namespace nn {
diff --git a/common/operations/LayerNormLSTMTest.cpp b/common/operations/LayerNormLSTMTest.cpp index deed9e2..1cac0df 100644 --- a/common/operations/LayerNormLSTMTest.cpp +++ b/common/operations/LayerNormLSTMTest.cpp
@@ -13,17 +13,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include <android-base/logging.h> -#include <gmock/gmock.h> -#include <gtest/gtest.h> +#include "LSTM.h" +#include "NeuralNetworksWrapper.h" + +#include <android-base/logging.h> +#include <gmock/gmock-matchers.h> +#include <gtest/gtest.h> #include <sstream> #include <string> #include <vector> -#include "LSTM.h" -#include "NeuralNetworksWrapper.h" - namespace android { namespace nn { namespace wrapper {
diff --git a/common/operations/LocalResponseNormalization.cpp b/common/operations/LocalResponseNormalization.cpp index d286924..40220e1 100644 --- a/common/operations/LocalResponseNormalization.cpp +++ b/common/operations/LocalResponseNormalization.cpp
@@ -16,18 +16,16 @@ #define LOG_TAG "Operations" +#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h> + #include <algorithm> #include <vector> +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "Tracing.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h> - -#include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION - namespace android { namespace nn { namespace local_response_norm { @@ -45,9 +43,10 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + inline bool localResponseNormFloat32Impl(const float* inputData, const Shape& inputShape, int32_t radius, float bias, float alpha, float beta, int32_t axis, float* outputData, @@ -88,7 +87,6 @@ const Shape& outputShape) { int32_t ndim = getNumberOfDimensions(inputShape); NN_CHECK(handleNegativeAxis(inputShape, &axis)); - radius = std::min(radius, static_cast<int32_t>(inputShape.dimensions[axis])); // TFLite optimized implementation only supports computation along the last axis if (axis == ndim - 1) { NNTRACE_COMP("optimized_ops::LocalResponseNormalization::float"); @@ -134,9 +132,8 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK(context->getNumInputs() == kNumInputs || context->getNumInputs() == kNumInputs - 1); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); @@ -144,16 +141,15 @@ const OperandType inputType = context->getInputType(kInputTensor); std::vector<OperandType> inExpectedTypes; std::vector<OperandType> outExpectedTypes; - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_FLOAT32) { - minSupportedVersion = Version::ANDROID_OC_MR1; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); inExpectedTypes = { OperandType::TENSOR_FLOAT32, OperandType::INT32, OperandType::FLOAT32, OperandType::FLOAT32, OperandType::FLOAT32, }; outExpectedTypes = {OperandType::TENSOR_FLOAT32}; } else if (inputType == OperandType::TENSOR_FLOAT16) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); inExpectedTypes = { OperandType::TENSOR_FLOAT16, OperandType::INT32, OperandType::FLOAT16, OperandType::FLOAT16, OperandType::FLOAT16, @@ -165,21 +161,19 @@ if (context->getNumInputs() == kNumInputs) { inExpectedTypes.push_back(OperandType::INT32); - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } else if (context->getInputShape(kInputTensor).dimensions.size() != 4) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } const Shape& input = context->getInputShape(kInputTensor); if (hasKnownRank(input)) { NN_RET_CHECK_LE(getNumberOfDimensions(input), 4); } - NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, inExpectedTypes) && + validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { const Shape& input = context->getInputShape(kInputTensor); int32_t numDimensions = getNumberOfDimensions(input); @@ -189,8 +183,6 @@ NN_RET_CHECK_LE(numDimensions, 4); NN_RET_CHECK_GE(axis, -numDimensions); NN_RET_CHECK_LT(axis, numDimensions); - const int32_t radius = context->getInputValue<int32_t>(kRadiusScalar); - NN_RET_CHECK_GE(radius, 0); return context->setOutputShape(kOutputTensor, input); } @@ -204,7 +196,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace local_response_norm
diff --git a/common/operations/LogSoftmax.cpp b/common/operations/LogSoftmax.cpp index 6fe934a..4132ef9 100644 --- a/common/operations/LogSoftmax.cpp +++ b/common/operations/LogSoftmax.cpp
@@ -16,18 +16,19 @@ #define LOG_TAG "Operations" -#include <algorithm> -#include <cmath> -#include <vector> - +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" #include "Tracing.h" +#include <cmath> + namespace android { namespace nn { namespace log_softmax { +using namespace hal; + constexpr char kOperationName[] = "LOG_SOFTMAX"; constexpr uint32_t kNumInputs = 3; @@ -70,7 +71,7 @@ return true; } -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor); @@ -83,11 +84,12 @@ inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::FLOAT16, OperandType::INT32}; outExpectedTypes = {OperandType::TENSOR_FLOAT16}; } else { - return NN_ERROR() << "Unsupported input tensor type for operation " << kOperationName; + LOG(ERROR) << "Unsupported input tensor type for operation " << kOperationName; + return false; } NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); NN_RET_CHECK(validateOutputTypes(context, outExpectedTypes)); - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } bool prepare(IOperationExecutionContext* context) {
diff --git a/common/operations/LogicalAndOr.cpp b/common/operations/LogicalAndOr.cpp index e1927a5..6ada724 100644 --- a/common/operations/LogicalAndOr.cpp +++ b/common/operations/LogicalAndOr.cpp
@@ -16,9 +16,7 @@ #define LOG_TAG "Operations" -#include <functional> -#include <vector> - +#include "HalInterfaces.h" #include "IndexedShapeWrapper.h" #include "OperationResolver.h" #include "OperationsUtils.h" @@ -36,6 +34,8 @@ namespace { +using namespace hal; + bool compute(const std::function<bool(bool, bool)>& func, const bool8* aData, const Shape& aShape, const bool8* bData, const Shape& bShape, bool8* outputData, const Shape& outputShape) { IndexedShapeWrapper aShapeIndexed(aShape); @@ -60,7 +60,7 @@ } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor1); @@ -68,7 +68,7 @@ << "Unsupported tensor type for a logical operation"; NN_RET_CHECK(validateInputTypes(context, {inputType, inputType})); NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } bool prepare(IOperationExecutionContext* context) {
diff --git a/common/operations/LogicalNot.cpp b/common/operations/LogicalNot.cpp index b93e71b..8b41813 100644 --- a/common/operations/LogicalNot.cpp +++ b/common/operations/LogicalNot.cpp
@@ -16,6 +16,7 @@ #define LOG_TAG "Operations" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" @@ -31,6 +32,8 @@ namespace { +using namespace hal; + bool compute(const bool8* input, const Shape& shape, bool8* output) { const auto size = getNumberOfElements(shape); for (uint32_t i = 0; i < size; ++i) { @@ -41,7 +44,7 @@ } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor); @@ -49,7 +52,7 @@ << "Unsupported tensor type for LOGICAL_NOT"; NN_RET_CHECK(validateInputTypes(context, {inputType})); NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } bool prepare(IOperationExecutionContext* context) {
diff --git a/common/operations/MaximumMinimum.cpp b/common/operations/MaximumMinimum.cpp index 0b27e27..91a4bb0 100644 --- a/common/operations/MaximumMinimum.cpp +++ b/common/operations/MaximumMinimum.cpp
@@ -16,11 +16,11 @@ #define LOG_TAG "Operations" -#include "MaximumMinimum.h" - #include <algorithm> #include <vector> +#include "MaximumMinimum.h" +#include "HalInterfaces.h" #include "IndexedShapeWrapper.h" #include "OperationsUtils.h" #include "Tracing.h" @@ -31,6 +31,8 @@ namespace { +using namespace hal; + template <typename T> bool evalGeneric(const T* aData, const Shape& aShape, const T* bData, const Shape& bShape, bool isMinimum, T* outputData, const Shape& outputShape) { @@ -122,7 +124,7 @@ reinterpret_cast<int8_t*>(output), outputShape); } default: { - LOG(ERROR) << "Unsupported data type: " << shape1.type; + LOG(ERROR) << "Unsupported data type: " << toString(shape1.type); return false; } }
diff --git a/common/operations/Multinomial.cpp b/common/operations/Multinomial.cpp index a4cc3bf..7e1d2c6 100644 --- a/common/operations/Multinomial.cpp +++ b/common/operations/Multinomial.cpp
@@ -18,29 +18,27 @@ #include "Multinomial.h" -#include <algorithm> -#include <limits> -#include <vector> - #include "CpuExecutor.h" +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "Tracing.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include <tensorflow/lite/kernels/internal/tensor_utils.h> - -#include <unsupported/Eigen/CXX11/Tensor> - -#include "CpuOperationUtils.h" #include "guarded_philox_random.h" #include "philox_random.h" #include "simple_philox.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION + +#include <algorithm> +#include <limits> +#include <unsupported/Eigen/CXX11/Tensor> +#include <vector> namespace android { namespace nn { namespace { +using namespace hal; + template <typename T> inline T* GetBuffer(RunTimeOperandInfo* operand) { return reinterpret_cast<T*>(operand->buffer);
diff --git a/common/operations/Multinomial.h b/common/operations/Multinomial.h index ba0b114..0f5434e 100644 --- a/common/operations/Multinomial.h +++ b/common/operations/Multinomial.h
@@ -17,11 +17,13 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_MULTINOMIAL_H #define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_MULTINOMIAL_H +#include <tensorflow/lite/kernels/internal/tensor_utils.h> + #include <algorithm> #include <cmath> #include <vector> -#include "nnapi/Types.h" +#include "HalInterfaces.h" namespace android { namespace nn { @@ -31,9 +33,9 @@ class Multinomial { public: - Multinomial(const Operation& operation, RunTimeOperandInfo* operands); + Multinomial(const hal::Operation& operation, RunTimeOperandInfo* operands); - static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, + static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands, Shape* outputShape); bool Eval();
diff --git a/common/operations/MultinomialTest.cpp b/common/operations/MultinomialTest.cpp index 85621a8..e34de63 100644 --- a/common/operations/MultinomialTest.cpp +++ b/common/operations/MultinomialTest.cpp
@@ -14,17 +14,17 @@ * limitations under the License. */ -#include <gmock/gmock.h> -#include <gtest/gtest.h> - -#include <unsupported/Eigen/CXX11/Tensor> -#include <vector> - #include "Multinomial.h" + +#include "HalInterfaces.h" #include "NeuralNetworksWrapper.h" #include "philox_random.h" #include "simple_philox.h" +#include <gmock/gmock-matchers.h> +#include <gtest/gtest.h> +#include <unsupported/Eigen/CXX11/Tensor> + namespace android { namespace nn { namespace wrapper {
diff --git a/common/operations/Neg.cpp b/common/operations/Neg.cpp index 39b58b9..48d962c 100644 --- a/common/operations/Neg.cpp +++ b/common/operations/Neg.cpp
@@ -16,12 +16,13 @@ #define LOG_TAG "Operations" -#include <cmath> - +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" #include "Tracing.h" +#include <cmath> + namespace android { namespace nn { namespace neg { @@ -36,6 +37,8 @@ namespace { +using namespace hal; + template <typename T> inline bool compute(const T* input, const Shape& shape, T* output) { const auto size = getNumberOfElements(shape); @@ -47,7 +50,7 @@ } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor); @@ -56,7 +59,7 @@ << "Unsupported tensor type for operation " << kOperationName; NN_RET_CHECK(validateInputTypes(context, {inputType})); NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } bool prepare(IOperationExecutionContext* context) {
diff --git a/common/operations/PRelu.cpp b/common/operations/PRelu.cpp index c3ca211..a799a84 100644 --- a/common/operations/PRelu.cpp +++ b/common/operations/PRelu.cpp
@@ -19,19 +19,20 @@ #include <algorithm> #include <vector> +#include "HalInterfaces.h" #include "IndexedShapeWrapper.h" #include "OperationResolver.h" #include "OperationsUtils.h" #include "Tracing.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h> -#endif // NN_INCLUDE_CPU_IMPLEMENTATION namespace android { namespace nn { namespace prelu { +using namespace hal; + constexpr char kOperationName[] = "PRELU"; constexpr uint32_t kNumInputs = 2; @@ -41,7 +42,6 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION template <typename T> inline bool eval(const std::function<T(const T&, const T&)>& func, const T* aData, const Shape& aShape, const T* bData, const Shape& bShape, T* outputData, @@ -97,9 +97,8 @@ }, aData, aShape, bData, bShape, outputData, outputShape); } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); auto inputType = context->getInputType(kInputTensor); @@ -111,13 +110,12 @@ NN_RET_CHECK(validateInputTypes(context, {inputType, inputType})); NN_RET_CHECK(validateOutputTypes(context, {inputType})); if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - return Version::ANDROID_R; + return validateHalVersion(context, HalVersion::V1_3); } else { - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape input = context->getInputShape(kInputTensor); Shape alpha = context->getInputShape(kAlphaTensor); @@ -171,7 +169,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace prelu
diff --git a/common/operations/Pooling.cpp b/common/operations/Pooling.cpp index 2ab327f..3ffa70f 100644 --- a/common/operations/Pooling.cpp +++ b/common/operations/Pooling.cpp
@@ -16,22 +16,21 @@ #define LOG_TAG "Operations" -#include <vector> - -#include "OperationResolver.h" -#include "Tracing.h" -#include "nnapi/Validation.h" - -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h> #include <tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h> +#include <vector> + #include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION +#include "HalInterfaces.h" +#include "OperationResolver.h" +#include "Tracing.h" namespace android { namespace nn { +using namespace hal; + namespace pooling { constexpr uint32_t kInputTensor = 0; @@ -39,7 +38,6 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { struct PoolingParam { @@ -291,23 +289,21 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(OperationType opType, const IOperationValidationContext* context) { +bool validate(OperationType opType, const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); auto inputCount = context->getNumInputs(); NN_RET_CHECK(inputCount == 11 || inputCount == 10 || inputCount == 8 || inputCount == 7); auto inputType = context->getInputType(kInputTensor); std::vector<OperandType> inExpectedTypes; - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_FLOAT32) { - minSupportedVersion = Version::ANDROID_OC_MR1; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); inExpectedTypes = { inputType, OperandType::INT32, OperandType::INT32, OperandType::INT32, OperandType::INT32, OperandType::INT32, OperandType::INT32, }; } else if (inputType == OperandType::TENSOR_FLOAT16) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); inExpectedTypes = { OperandType::TENSOR_FLOAT16, OperandType::INT32, OperandType::INT32, OperandType::INT32, OperandType::INT32, OperandType::INT32, @@ -315,7 +311,7 @@ }; } else if (opType != OperationType::L2_POOL_2D && inputType == OperandType::TENSOR_QUANT8_ASYMM) { - minSupportedVersion = Version::ANDROID_OC_MR1; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); inExpectedTypes = { OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32, @@ -327,7 +323,7 @@ }; } else if (opType != OperationType::L2_POOL_2D && inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); inExpectedTypes = { OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32, @@ -338,7 +334,8 @@ OperandType::INT32, }; } else { - NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " << opType; + NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " + << getOperationName(opType); } if (inputCount >= 10) { @@ -348,16 +345,14 @@ } if (inputCount == 11 || inputCount == 8) { inExpectedTypes.push_back(OperandType::BOOL); - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_Q); + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } else { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_OC_MR1); + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); } - NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, inExpectedTypes) && + validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape input = context->getInputShape(kInputTensor); NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4); @@ -437,7 +432,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation MAX_POOL_2D"; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION #undef POOLING_DISPATCH_INPUT_TYPE
diff --git a/common/operations/Pow.cpp b/common/operations/Pow.cpp index 2506549..40c4adf 100644 --- a/common/operations/Pow.cpp +++ b/common/operations/Pow.cpp
@@ -17,19 +17,20 @@ #define LOG_TAG "Operations" #include "Pow.h" - -#include <cmath> -#include <vector> - +#include "HalInterfaces.h" #include "IndexedShapeWrapper.h" #include "OperationsUtils.h" +#include <cmath> + namespace android { namespace nn { namespace pow { namespace { +using namespace hal; + template <typename T> bool evalGeneric(const T* baseData, const Shape& baseShape, const T* exponentData, const Shape& exponentShape, T* outputData, const Shape& outputShape) { @@ -80,7 +81,7 @@ reinterpret_cast<float*>(outputData), outputShape); } break; default: { - LOG(ERROR) << "Unsupported data type: " << baseShape.type; + LOG(ERROR) << "Unsupported data type: " << toString(baseShape.type); return false; } }
diff --git a/common/operations/QLSTM.cpp b/common/operations/QLSTM.cpp index b6ed41c..3b2dd05 100644 --- a/common/operations/QLSTM.cpp +++ b/common/operations/QLSTM.cpp
@@ -20,10 +20,7 @@ #include "CpuExecutor.h" #include "OperationsUtils.h" - -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #include "QuantUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION namespace android { namespace nn { @@ -104,7 +101,9 @@ } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +using hal::OperandType; + +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); @@ -152,7 +151,7 @@ outExpectedTypes.push_back(OperandType::TENSOR_QUANT8_ASYMM_SIGNED); NN_RET_CHECK(validateOutputTypes(context, outExpectedTypes)); - return Version::ANDROID_R; + return validateHalVersion(context, HalVersion::V1_3); } bool prepare(IOperationExecutionContext* context) { @@ -363,7 +362,6 @@ context->setOutputShape(kOutputTensor, outputShape); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool execute(IOperationExecutionContext* context) { // Gets the inputs. const Shape inputShape = context->getInputShape(kInputTensor); @@ -796,7 +794,6 @@ return true; } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace qlstm
diff --git a/common/operations/Quantize.cpp b/common/operations/Quantize.cpp index f6fac4f..fa04bdd 100644 --- a/common/operations/Quantize.cpp +++ b/common/operations/Quantize.cpp
@@ -14,16 +14,17 @@ * limitations under the License. */ +#include "OperationsUtils.h" #define LOG_TAG "Operations" +#include "HalInterfaces.h" +#include "IndexedShapeWrapper.h" +#include "OperationResolver.h" +#include "Tracing.h" + #include <algorithm> #include <cmath> -#include "IndexedShapeWrapper.h" -#include "OperationResolver.h" -#include "OperationsUtils.h" -#include "Tracing.h" - namespace android { namespace nn { namespace quantize { @@ -36,6 +37,8 @@ namespace { +using namespace hal; + template <typename T> bool quantizeToQuant8(const T* inputData, uint8_t* outputData, const Shape& outputShape) { NNTRACE_COMP("quantizeToQuant8"); @@ -63,7 +66,7 @@ } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); @@ -72,14 +75,14 @@ NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_FLOAT32) - << "Unsupported input operand type for QUANTIZE op: " << inputType; + << "Unsupported input operand type for QUANTIZE op: " << toString(inputType); NN_RET_CHECK(outputType == OperandType::TENSOR_QUANT8_ASYMM || outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) - << "Unsupported output operand type for QUANTIZE op: " << outputType; + << "Unsupported output operand type for QUANTIZE op: " << toString(outputType); if (outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - return Version::ANDROID_R; + return validateHalVersion(context, HalVersion::V1_3); } else { - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } } @@ -118,7 +121,8 @@ } } NN_RET_CHECK_FAIL() << "Unsupported tensor types combination for QUANTIZE op. (input type: " - << inputType << " output type: " << context->getOutputType(kOutputTensor) + << toString(inputType) + << " output type: " << toString(context->getOutputType(kOutputTensor)) << ")"; }
diff --git a/common/operations/QuantizedLSTM.cpp b/common/operations/QuantizedLSTM.cpp index 590bdb9..e059026 100644 --- a/common/operations/QuantizedLSTM.cpp +++ b/common/operations/QuantizedLSTM.cpp
@@ -18,21 +18,24 @@ #include "QuantizedLSTM.h" -#include <public/gemmlowp.h> -#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h> - -#include <algorithm> -#include <vector> - #include "CpuExecutor.h" #include "CpuOperationUtils.h" +#include "HalInterfaces.h" + #include "Tracing.h" +#include <public/gemmlowp.h> +#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h> +#include <algorithm> +#include <vector> + namespace android { namespace nn { namespace { +using namespace hal; + template <typename T> inline T* GetBuffer(RunTimeOperandInfo* operand) { return reinterpret_cast<T*>(operand->buffer);
diff --git a/common/operations/QuantizedLSTM.h b/common/operations/QuantizedLSTM.h index 61963c0..76e74c6 100644 --- a/common/operations/QuantizedLSTM.h +++ b/common/operations/QuantizedLSTM.h
@@ -28,9 +28,9 @@ class QuantizedLSTMCell { public: - QuantizedLSTMCell(const Operation& operation, RunTimeOperandInfo* operands); + QuantizedLSTMCell(const hal::Operation& operation, RunTimeOperandInfo* operands); - static bool prepare(const Operation& operation, RunTimeOperandInfo* operands, + static bool prepare(const hal::Operation& operation, RunTimeOperandInfo* operands, Shape* cellStateShape, Shape* outputShape); bool eval();
diff --git a/common/operations/QuantizedLSTMTest.cpp b/common/operations/QuantizedLSTMTest.cpp index fd1eb4a..0db5106 100644 --- a/common/operations/QuantizedLSTMTest.cpp +++ b/common/operations/QuantizedLSTMTest.cpp
@@ -14,14 +14,13 @@ * limitations under the License. */ -#include <gmock/gmock.h> -#include <gtest/gtest.h> - -#include <iostream> -#include <vector> +#include "QuantizedLSTM.h" #include "NeuralNetworksWrapper.h" -#include "QuantizedLSTM.h" + +#include <gmock/gmock-matchers.h> +#include <gtest/gtest.h> +#include <iostream> namespace android { namespace nn {
diff --git a/common/operations/RNN.cpp b/common/operations/RNN.cpp index bc5a819..259c091 100644 --- a/common/operations/RNN.cpp +++ b/common/operations/RNN.cpp
@@ -16,17 +16,21 @@ #define LOG_TAG "Operations" -#include "RNN.h" - #include <vector> +#include "RNN.h" + #include "CpuExecutor.h" #include "CpuOperationUtils.h" +#include "HalInterfaces.h" + #include "Tracing.h" namespace android { namespace nn { +using namespace hal; + RNN::RNN(const Operation& operation, RunTimeOperandInfo* operands) { NNTRACE_TRANS("RNN::RNN"); input_ = GetInput(operation, operands, kInputTensor);
diff --git a/common/operations/RNN.h b/common/operations/RNN.h index 0a5765b..245eb1d 100644 --- a/common/operations/RNN.h +++ b/common/operations/RNN.h
@@ -20,7 +20,7 @@ #include <vector> #include "ActivationFunctor.h" -#include "nnapi/Types.h" +#include "HalInterfaces.h" namespace android { namespace nn { @@ -30,9 +30,9 @@ class RNN { public: - RNN(const Operation& operation, RunTimeOperandInfo* operands); + RNN(const hal::Operation& operation, RunTimeOperandInfo* operands); - static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, + static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands, Shape* hiddenStateShape, Shape* outputShape); bool Eval();
diff --git a/common/operations/RNNTest.cpp b/common/operations/RNNTest.cpp index 5f02afd..66acac7 100644 --- a/common/operations/RNNTest.cpp +++ b/common/operations/RNNTest.cpp
@@ -14,13 +14,12 @@ * limitations under the License. */ -#include <gmock/gmock.h> -#include <gtest/gtest.h> - -#include <vector> +#include "RNN.h" #include "NeuralNetworksWrapper.h" -#include "RNN.h" + +#include <gmock/gmock-matchers.h> +#include <gtest/gtest.h> namespace android { namespace nn {
diff --git a/common/operations/Rank.cpp b/common/operations/Rank.cpp index 23f5c1e..5f74437 100644 --- a/common/operations/Rank.cpp +++ b/common/operations/Rank.cpp
@@ -16,9 +16,10 @@ #define LOG_TAG "Operations" -#include "LegacyUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" +#include "Utils.h" namespace android { namespace nn { @@ -30,23 +31,23 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputScalar = 0; -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); - OperandType inputType = context->getInputType(kInputTensor); - NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT16_SYMM || - inputType == OperandType::TENSOR_BOOL8 || - inputType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || - inputType == OperandType::TENSOR_QUANT16_ASYMM || - inputType == OperandType::TENSOR_QUANT8_SYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) - << "Incorrect input type for a RANK op: " << inputType; - NN_RET_CHECK(validateOutputTypes(context, {OperandType::INT32})); - return Version::ANDROID_R; + hal::OperandType inputType = context->getInputType(kInputTensor); + NN_RET_CHECK(inputType == hal::OperandType::TENSOR_FLOAT16 || + inputType == hal::OperandType::TENSOR_FLOAT32 || + inputType == hal::OperandType::TENSOR_INT32 || + inputType == hal::OperandType::TENSOR_QUANT8_ASYMM || + inputType == hal::OperandType::TENSOR_QUANT16_SYMM || + inputType == hal::OperandType::TENSOR_BOOL8 || + inputType == hal::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || + inputType == hal::OperandType::TENSOR_QUANT16_ASYMM || + inputType == hal::OperandType::TENSOR_QUANT8_SYMM || + inputType == hal::OperandType::TENSOR_QUANT8_ASYMM_SIGNED) + << "Incorrect input type for a RANK op: " << toString(inputType); + NN_RET_CHECK(validateOutputTypes(context, {hal::OperandType::INT32})); + return validateHalVersion(context, HalVersion::V1_3); } bool prepare(IOperationExecutionContext* context) {
diff --git a/common/operations/Reduce.cpp b/common/operations/Reduce.cpp index 91795d9..220a4dc 100644 --- a/common/operations/Reduce.cpp +++ b/common/operations/Reduce.cpp
@@ -16,18 +16,17 @@ #define LOG_TAG "Operations" +#include <tensorflow/lite/kernels/internal/reference/reference_ops.h> + #include <algorithm> #include <limits> #include <vector> +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" #include "Tracing.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include <tensorflow/lite/kernels/internal/reference/reference_ops.h> -#endif // NN_INCLUDE_CPU_IMPLEMENTATION - namespace android { namespace nn { namespace reduce { @@ -45,9 +44,10 @@ constexpr _Float16 kFloat16Max = 65504; constexpr _Float16 kFloat16Lowest = -kFloat16Max; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + template <typename T> inline bool compute(IOperationExecutionContext* context, T init, T func(T, T)) { const Shape inputShape = context->getInputShape(kInputTensor); @@ -68,9 +68,8 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validateProdSum(const IOperationValidationContext* context) { +bool validateProdSum(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor); @@ -84,10 +83,10 @@ if (hasKnownRank(input)) { NN_RET_CHECK_LE(getNumberOfDimensions(input), 4); } - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } -Result<Version> validateMaxMin(const IOperationValidationContext* context) { +bool validateMaxMin(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor); @@ -99,18 +98,18 @@ NN_RET_CHECK( validateInputTypes(context, {inputType, OperandType::TENSOR_INT32, OperandType::BOOL})); NN_RET_CHECK(validateOutputTypes(context, {inputType})); - auto minVersion = Version::ANDROID_Q; + auto minHalVersion = HalVersion::V1_2; if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minVersion = Version::ANDROID_R; + minHalVersion = HalVersion::V1_3; } const Shape& input = context->getInputShape(kInputTensor); if (hasKnownRank(input)) { NN_RET_CHECK_LE(getNumberOfDimensions(input), 4); } - return minVersion; + return validateHalVersion(context, minHalVersion); } -Result<Version> validateLogical(const IOperationValidationContext* context) { +bool validateLogical(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor); @@ -123,10 +122,9 @@ if (hasKnownRank(input)) { NN_RET_CHECK_LE(getNumberOfDimensions(input), 4); } - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape inputShape = context->getInputShape(kInputTensor); const uint32_t inputRank = getNumberOfDimensions(inputShape); @@ -252,7 +250,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation REDUCE_ALL"; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace reduce
diff --git a/common/operations/Reshape.cpp b/common/operations/Reshape.cpp index 35dee5f..48c293e 100644 --- a/common/operations/Reshape.cpp +++ b/common/operations/Reshape.cpp
@@ -18,14 +18,14 @@ #define LOG_TAG "Operations" -#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h> -#include <tensorflow/lite/kernels/internal/reference/reference_ops.h> - #include <vector> #include "CpuOperationUtils.h" -#include "LegacyUtils.h" #include "Operations.h" + +#include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h> +#include <tensorflow/lite/kernels/internal/reference/reference_ops.h> + #include "Tracing.h" namespace android {
diff --git a/common/operations/ResizeImageOps.cpp b/common/operations/ResizeImageOps.cpp index a9ffab5..c33abaf 100644 --- a/common/operations/ResizeImageOps.cpp +++ b/common/operations/ResizeImageOps.cpp
@@ -16,23 +16,22 @@ #define LOG_TAG "Operations" +#include <tensorflow/lite/kernels/internal/reference/reference_ops.h> + #include <algorithm> #include <functional> #include <vector> +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "Tracing.h" -#include "nnapi/Validation.h" - -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include <tensorflow/lite/kernels/internal/reference/reference_ops.h> - -#include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION namespace android { namespace nn { +using namespace hal; + namespace resize_image { constexpr uint32_t kNumInputs = 4; @@ -48,7 +47,6 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { inline float scaleHalfPixel(const int x, const float scale) { @@ -172,35 +170,33 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(OperationType opType, const IOperationValidationContext* context) { +bool validate(OperationType opType, const IOperationValidationContext* context) { const auto numInputs = context->getNumInputs(); if (opType == OperationType::RESIZE_BILINEAR) { NN_RET_CHECK(numInputs >= kNumInputs - 1 && numInputs <= kNumInputs + kNumOptionalInputs); } else if (opType == OperationType::RESIZE_NEAREST_NEIGHBOR) { NN_RET_CHECK(numInputs >= kNumInputs && numInputs <= kNumInputs + kNumOptionalInputs); } else { - NN_RET_CHECK_FAIL() << "Unsupported operation " << opType; + NN_RET_CHECK_FAIL() << "Unsupported operation " << getOperationName(opType); } NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); auto inputType = context->getInputType(kInputTensor); auto scalarType = context->getInputType(kOutputHeightParamScalar); std::vector<OperandType> inExpectedTypes = {inputType, scalarType, scalarType}; - auto minSupportedVersion = Version::ANDROID_OC_MR1; NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM || inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) - << "Unsupported tensor type for operation " << opType; + << "Unsupported tensor type for operation " << getOperationName(opType); if (inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_QUANT8_ASYMM) { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_Q); + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_R); + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); } if (scalarType != OperandType::INT32) { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_Q); + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); if (inputType == OperandType::TENSOR_FLOAT32) { NN_RET_CHECK(scalarType == OperandType::FLOAT32); } else if (inputType == OperandType::TENSOR_FLOAT16) { @@ -211,22 +207,20 @@ } } if (numInputs < kNumInputs) { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_OC_MR1); + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); } else if (numInputs == kNumInputs) { inExpectedTypes.push_back(OperandType::BOOL); - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_Q); + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } else { while (inExpectedTypes.size() < numInputs) { inExpectedTypes.push_back(OperandType::BOOL); } - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_R); + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); } - NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, inExpectedTypes) && + validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(OperationType opType, IOperationExecutionContext* context) { Shape input = context->getInputShape(kInputTensor); NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4); @@ -264,7 +258,7 @@ static_cast<float>(inWidth) * static_cast<float>(context->getInputValue<_Float16>(kOutputWidthParamScalar))); } else { - NN_RET_CHECK_FAIL() << "Unsupported scalar type for operation " << opType; + NN_RET_CHECK_FAIL() << "Unsupported scalar type for operation " << getOperationName(opType); } NN_RET_CHECK_GT(height, 0); NN_RET_CHECK_GT(width, 0); @@ -310,10 +304,10 @@ context->getOutputShape(kOutputTensor)); default: - NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << opType; + NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " + << getOperationName(opType); } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace resize_image
diff --git a/common/operations/RoiAlign.cpp b/common/operations/RoiAlign.cpp index 2341d70..b9daf45 100644 --- a/common/operations/RoiAlign.cpp +++ b/common/operations/RoiAlign.cpp
@@ -16,20 +16,17 @@ #define LOG_TAG "Operations" -#include <algorithm> -#include <cfloat> -#include <cmath> -#include <vector> - +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" #include "Tracing.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #include <tensorflow/lite/kernels/internal/common.h> - -#include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION +#include <algorithm> +#include <cfloat> +#include <cmath> +#include <vector> namespace android { namespace nn { @@ -52,9 +49,10 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + template <typename T_Input, typename T_Roi> inline bool roiAlignNhwc(const T_Input* inputData, const Shape& inputShape, const T_Roi* roiData, const Shape& roiShape, const int32_t* batchSplitData, @@ -340,9 +338,8 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); std::vector<OperandType> inExpectedTypes; @@ -372,18 +369,18 @@ OperandType::INT32, OperandType::BOOL}; } else { - return NN_ERROR() << "Unsupported input tensor type for operation " << kOperationName; + LOG(ERROR) << "Unsupported input tensor type for operation " << kOperationName; + return false; } NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); NN_RET_CHECK(validateOutputTypes(context, {inputType})); if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - return Version::ANDROID_R; + return validateHalVersion(context, HalVersion::V1_3); } else { - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { bool useNchw = context->getInputValue<bool>(kLayoutScalar); Shape input = context->getInputShape(kInputTensor); @@ -506,7 +503,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace roi_align
diff --git a/common/operations/RoiPooling.cpp b/common/operations/RoiPooling.cpp index a2a003a..a4f8214 100644 --- a/common/operations/RoiPooling.cpp +++ b/common/operations/RoiPooling.cpp
@@ -21,14 +21,12 @@ #include <cmath> #include <vector> +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" #include "Tracing.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION - namespace android { namespace nn { namespace roi_pooling { @@ -48,9 +46,10 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + template <typename T_Input, typename T_Roi> inline bool roiPoolingNhwc(const T_Input* inputData, const Shape& inputShape, const T_Roi* roiData, const Shape& roiShape, const int32_t* batchSplitData, @@ -187,9 +186,8 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); std::vector<OperandType> inExpectedTypes; @@ -215,18 +213,19 @@ OperandType::FLOAT32, OperandType::BOOL}; } else { - return NN_ERROR() << "Unsupported input tensor type for operation " << kOperationName; + LOG(ERROR) << "Unsupported input tensor type for operation " << kOperationName; + return false; } NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); NN_RET_CHECK(validateOutputTypes(context, {inputType})); if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - return Version::ANDROID_R; + return validateHalVersion(context, HalVersion::V1_3); + ; } else { - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { bool useNchw = context->getInputValue<bool>(kLayoutScalar); Shape input = context->getInputShape(kInputTensor); @@ -328,7 +327,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace roi_pooling
diff --git a/common/operations/SVDF.cpp b/common/operations/SVDF.cpp index 630dbb4..12b91f4 100644 --- a/common/operations/SVDF.cpp +++ b/common/operations/SVDF.cpp
@@ -18,16 +18,18 @@ #include "SVDF.h" -#include <algorithm> -#include <vector> - #include "CpuExecutor.h" #include "CpuOperationUtils.h" +#include "HalInterfaces.h" + +#include <vector> #include "Tracing.h" namespace android { namespace nn { +using namespace hal; + SVDF::SVDF(const Operation& operation, RunTimeOperandInfo* operands) { NNTRACE_TRANS("SVDF::SVDF"); input_ = GetInput(operation, operands, kInputTensor); @@ -180,48 +182,54 @@ state_ptr[memory_size - 1] = 0.0; } } - - // Clear scratch (the matmul is accumulative). - float scratch[batch_size * num_filters]; - std::fill_n(scratch, batch_size * num_filters, 0.0f); + // The state left most column is used to save current cycle activation. This + // is achieved by starting at state->data.f[memory_size - 1] and having the + // stride equal to memory_size. tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate( - weightsFeatureData, num_filters, input_size, inputData, batch_size, scratch); + weightsFeatureData, num_filters, input_size, inputData, batch_size, + &outputStateData[memory_size - 1], memory_size); - // Copy the latest activation from scratch into activation_state: - // The last, i.e. (memory_size-1)th entry for each batch, and filter. - for (int i = 0; i < batch_size * num_filters; ++i) { - outputStateData[i * memory_size + memory_size - 1] = scratch[i]; - } - - // Begin ApplyTimeWeightsBiasAndActivation // Compute matmul(state, weights_time). + // The right most column is used to save temporary output (with the size of + // num_filters). This is achieved by starting at state->data.f and having the + // stride equal to memory_size. + float scratch[batch_size * num_filters]; for (int b = 0; b < batch_size; b++) { float* state_out_ptr_batch = outputStateData + b * memory_size * num_filters; float* scratch_ptr_batch = scratch + b * num_filters; tflite::tensor_utils::BatchVectorBatchVectorDotProduct( - weightsTimeData, state_out_ptr_batch, memory_size, num_filters, scratch_ptr_batch); + weightsTimeData, state_out_ptr_batch, memory_size, num_filters, scratch_ptr_batch, + /*result_stride=*/1); + } + + // Initialize output with bias if provided. + if (!IsNullInput(bias_)) { + tflite::tensor_utils::VectorBatchVectorAssign(biasData, num_units, batch_size, outputData); + } else { + std::fill_n(outputData, batch_size * num_units, 0.0f); } // Reduction sum - tflite::tensor_utils::ReductionSumVector(scratch, outputData, batch_size * num_units, rank); - - // Add bias if provided. - if (!IsNullInput(bias_)) { - tflite::tensor_utils::VectorBatchVectorAdd(biasData, num_units, batch_size, outputData); + for (int b = 0; b < batch_size; b++) { + float* output_ptr_batch = outputData + b * num_units; + float* scratch_ptr_batch = scratch + b * num_filters; + tflite::tensor_utils::ReductionSumVector(scratch_ptr_batch, output_ptr_batch, num_units, + rank); } // Apply activation. - tflite::tensor_utils::ApplyActivationToVector(outputData, batch_size * num_units, - params_.activation_, outputData); - // Finished ApplyTimeWeightsBiasAndActivation + for (int b = 0; b < batch_size; b++) { + float* output_ptr_batch = outputData + b * num_units; + tflite::tensor_utils::ApplyActivationToVector(output_ptr_batch, num_units, + params_.activation_, output_ptr_batch); + } // Right shift the state. for (int b = 0; b < batch_size; b++) { float* state_out_ptr_batch = outputStateData + b * memory_size * num_filters; for (int f = 0; f < num_filters; f++) { - std::copy(state_out_ptr_batch + 1, state_out_ptr_batch + memory_size, - state_out_ptr_batch); - state_out_ptr_batch[memory_size - 1] = 0.0; + tflite::tensor_utils::VectorShiftLeft<float>(state_out_ptr_batch, memory_size, + /*shift_value=*/0.0); state_out_ptr_batch += memory_size; } }
diff --git a/common/operations/SVDF.h b/common/operations/SVDF.h index da18568..ca9b54e 100644 --- a/common/operations/SVDF.h +++ b/common/operations/SVDF.h
@@ -23,7 +23,7 @@ #include <cmath> #include <vector> -#include "nnapi/Types.h" +#include "HalInterfaces.h" namespace android { namespace nn { @@ -38,10 +38,10 @@ class SVDF { public: - SVDF(const Operation& operation, RunTimeOperandInfo* operands); + SVDF(const hal::Operation& operation, RunTimeOperandInfo* operands); - static bool Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* stateShape, - Shape* outputShape); + static bool Prepare(const hal::Operation& operation, RunTimeOperandInfo* operands, + Shape* stateShape, Shape* outputShape); bool Eval(); static constexpr int kInputTensor = 0;
diff --git a/common/operations/SVDFTest.cpp b/common/operations/SVDFTest.cpp index 4132352..21f769f 100644 --- a/common/operations/SVDFTest.cpp +++ b/common/operations/SVDFTest.cpp
@@ -14,14 +14,12 @@ * limitations under the License. */ -#include <gmock/gmock.h> -#include <gtest/gtest.h> - -#include <vector> - -#include "NeuralNetworksWrapper.h" #include "SVDF.h" +#include <gmock/gmock-matchers.h> +#include <gtest/gtest.h> +#include "NeuralNetworksWrapper.h" + using ::testing::FloatNear; using ::testing::Matcher;
diff --git a/common/operations/Select.cpp b/common/operations/Select.cpp index f037b48..2026595 100644 --- a/common/operations/Select.cpp +++ b/common/operations/Select.cpp
@@ -16,6 +16,7 @@ #define LOG_TAG "Operations" +#include "HalInterfaces.h" #include "IndexedShapeWrapper.h" #include "OperationResolver.h" #include "OperationsUtils.h" @@ -34,6 +35,8 @@ namespace { +using namespace hal; + template <typename T> bool compute(const bool8* conditionData, const Shape& conditionShape, const T* aData, const Shape& aShape, const T* bData, const Shape& bShape, T* outputData, @@ -66,7 +69,7 @@ } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor1); @@ -75,10 +78,10 @@ inputType == OperandType::TENSOR_INT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM || inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) - << "Unsupported input operand type for select op: " << inputType; + << "Unsupported input operand type for select op: " << toString(inputType); NN_RET_CHECK(validateInputTypes(context, {OperandType::TENSOR_BOOL8, inputType, inputType})); NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return Version::ANDROID_Q; + return validateHalVersion(context, HalVersion::V1_2); } bool prepare(IOperationExecutionContext* context) {
diff --git a/common/operations/Slice.cpp b/common/operations/Slice.cpp index 66480cb..3c4f2fa 100644 --- a/common/operations/Slice.cpp +++ b/common/operations/Slice.cpp
@@ -16,14 +16,12 @@ #define LOG_TAG "Operations" -#include <vector> - +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "IndexedShapeWrapper.h" #include "OperationResolver.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION +#include <vector> namespace android { namespace nn { @@ -39,7 +37,8 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION +using namespace hal; + namespace { template <typename T> @@ -81,9 +80,8 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); @@ -94,19 +92,16 @@ inputType == OperandType::TENSOR_QUANT8_ASYMM || inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) << "Unsupported tensor type for operation " << kOperationName; - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); } else { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } - NN_RET_CHECK(validateInputTypes( - context, {inputType, OperandType::TENSOR_INT32, OperandType::TENSOR_INT32})); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, + {inputType, OperandType::TENSOR_INT32, OperandType::TENSOR_INT32}) && + validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { const Shape& inputShape = context->getInputShape(kInputTensor); const int32_t n_dims = getNumberOfDimensions(inputShape); @@ -192,7 +187,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace slice
diff --git a/common/operations/Softmax.cpp b/common/operations/Softmax.cpp index 2a3f775..8c05628 100644 --- a/common/operations/Softmax.cpp +++ b/common/operations/Softmax.cpp
@@ -16,21 +16,17 @@ #define LOG_TAG "Operations" -#include <algorithm> -#include <cfloat> -#include <limits> -#include <vector> - -#include "OperationResolver.h" -#include "Tracing.h" -#include "nnapi/Validation.h" - -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h> #include <tensorflow/lite/kernels/internal/optimized/optimized_ops.h> +#include <algorithm> +#include <limits> +#include <vector> + #include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION +#include "HalInterfaces.h" +#include "OperationResolver.h" +#include "Tracing.h" namespace android { namespace nn { @@ -47,9 +43,10 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + inline bool softmaxSlowFloat32(const float* inputData, const Shape& inputShape, const float beta, int32_t axis, float* outputData, const Shape& outputShape) { NNTRACE_TRANS("softmaxFloatSlow32"); @@ -230,23 +227,21 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK(context->getNumInputs() == kNumInputs || context->getNumInputs() == kNumInputs - 1); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); auto inputType = context->getInputType(kInputTensor); std::vector<OperandType> inExpectedTypes; - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM) { - minSupportedVersion = Version::ANDROID_OC_MR1; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0)); inExpectedTypes = {inputType, OperandType::FLOAT32}; } else if (inputType == OperandType::TENSOR_FLOAT16) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); inExpectedTypes = {inputType, OperandType::FLOAT16}; } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); inExpectedTypes = {inputType, OperandType::FLOAT32}; } else { NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; @@ -256,19 +251,17 @@ NN_RET_CHECK_LE(inputRank, 4); } if (context->getNumInputs() == kNumInputs) { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_Q); + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); inExpectedTypes.push_back(OperandType::INT32); } else { if (inputRank != 2 && inputRank != 4 && inputRank != 0) { - minSupportedVersion = combineVersions(minSupportedVersion, Version::ANDROID_Q); + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } } - NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, inExpectedTypes) && + validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape input = context->getInputShape(kInputTensor); float beta = (input.type == OperandType::TENSOR_FLOAT16) @@ -316,7 +309,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace softmax
diff --git a/common/operations/Split.cpp b/common/operations/Split.cpp index 441b5a2..1b39162 100644 --- a/common/operations/Split.cpp +++ b/common/operations/Split.cpp
@@ -20,6 +20,7 @@ #include "Operations.h" #include "OperationsUtils.h" + #include "Tracing.h" namespace android {
diff --git a/common/operations/Squeeze.cpp b/common/operations/Squeeze.cpp index 1509012..276461d 100644 --- a/common/operations/Squeeze.cpp +++ b/common/operations/Squeeze.cpp
@@ -20,6 +20,7 @@ #include <vector> +#include "HalInterfaces.h" #include "OperationResolver.h" #include "Operations.h" #include "Tracing.h" @@ -35,7 +36,9 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -Result<Version> validate(const IOperationValidationContext* context) { +using namespace hal; + +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor); @@ -43,15 +46,15 @@ inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM || inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) - << "Unsupported input operand type for SQUEEZE op: " << inputType; + << "Unsupported input operand type for SQUEEZE op: " << toString(inputType); - Version minSupportedVersion; + HalVersion minSupportedHalVersion; if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + minSupportedHalVersion = HalVersion::V1_3; } else if (inputType == OperandType::TENSOR_FLOAT16) { - minSupportedVersion = Version::ANDROID_Q; + minSupportedHalVersion = HalVersion::V1_2; } else { - minSupportedVersion = Version::ANDROID_P; + minSupportedHalVersion = HalVersion::V1_1; } NN_RET_CHECK(validateInputTypes(context, { @@ -63,10 +66,9 @@ if (hasKnownRank(input)) { NN_RET_CHECK_LE(getNumberOfDimensions(input), 4); } - return minSupportedVersion; + return validateHalVersion(context, minSupportedHalVersion); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { // Only the squeeze dims tensor can be omitted. NN_RET_CHECK(!context->isOmittedInput(kInputTensor)); @@ -138,8 +140,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for SQUEEZE op."; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION - } // namespace squeeze NN_REGISTER_OPERATION(SQUEEZE, "SQUEEZE", squeeze::validate, squeeze::prepare, squeeze::execute,
diff --git a/common/operations/StridedSlice.cpp b/common/operations/StridedSlice.cpp index ee69b04..5ff5aec 100644 --- a/common/operations/StridedSlice.cpp +++ b/common/operations/StridedSlice.cpp
@@ -18,18 +18,16 @@ #define LOG_TAG "Operations" +#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h> + #include <vector> +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "Operations.h" #include "Tracing.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include <tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h> - -#include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION - namespace android { namespace nn { namespace strided_slice { @@ -46,9 +44,10 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + template <typename T> bool compute(const T* inputData, const Shape& inputShape, const int32_t* beginData, const int32_t* endData, const int32_t* stridesData, int32_t beginMask, int32_t endMask, @@ -99,9 +98,8 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor); @@ -109,15 +107,15 @@ inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM || inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) - << "Unsupported input operand type for STRIDED_SLICE op: " << inputType; + << "Unsupported input operand type for STRIDED_SLICE op: " << toString(inputType); - Version minSupportedVersion; + HalVersion minSupportedHalVersion; if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + minSupportedHalVersion = HalVersion::V1_3; } else if (inputType == OperandType::TENSOR_FLOAT16) { - minSupportedVersion = Version::ANDROID_Q; + minSupportedHalVersion = HalVersion::V1_2; } else { - minSupportedVersion = Version::ANDROID_P; + minSupportedHalVersion = HalVersion::V1_1; } NN_RET_CHECK(validateInputTypes(context, { @@ -134,10 +132,9 @@ if (hasKnownRank(input)) { NN_RET_CHECK_LE(getNumberOfDimensions(input), 4); } - return minSupportedVersion; + return validateHalVersion(context, minSupportedHalVersion); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { // StridedSlice op only supports 1D-4D input arrays. const Shape& inputShape = context->getInputShape(kInputTensor); @@ -219,8 +216,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for STRIDED_SLICE op."; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION - } // namespace strided_slice NN_REGISTER_OPERATION(STRIDED_SLICE, "STRIDED_SLICE", strided_slice::validate,
diff --git a/common/operations/Tile.cpp b/common/operations/Tile.cpp index da794a5..517d75e 100644 --- a/common/operations/Tile.cpp +++ b/common/operations/Tile.cpp
@@ -16,11 +16,11 @@ #define LOG_TAG "Operations" -#include "Tile.h" - #include <algorithm> #include <utility> +#include "Tile.h" +#include "HalInterfaces.h" #include "Tracing.h" namespace android { @@ -29,6 +29,8 @@ namespace { +using namespace hal; + template <typename T> void CopyMultipleTimes(const T* in_data, int32_t in_size, int32_t multiplier, T* out_data) { for (int i = 0; i < multiplier; ++i) {
diff --git a/common/operations/TopK_V2.cpp b/common/operations/TopK_V2.cpp index d19a309..e005b9a 100644 --- a/common/operations/TopK_V2.cpp +++ b/common/operations/TopK_V2.cpp
@@ -20,6 +20,7 @@ #include <utility> #include <vector> +#include "HalInterfaces.h" #include "OperationResolver.h" #include "OperationsUtils.h" @@ -37,6 +38,8 @@ namespace { +using namespace hal; + template <typename T> bool evalGeneric(const T* inputData, const Shape& inputShape, const int32_t k, T* valuesData, int32_t* indicesData) { @@ -73,7 +76,7 @@ } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); OperandType inputType = context->getInputType(kInputTensor); @@ -82,14 +85,14 @@ inputType == OperandType::TENSOR_INT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM || inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) - << "Unsupported input operand type for select op: " << inputType; + << "Unsupported input operand type for select op: " << toString(inputType); NN_RET_CHECK(validateInputTypes(context, {inputType, OperandType::INT32})); NN_RET_CHECK(validateOutputTypes(context, {inputType, OperandType::TENSOR_INT32})); - Version minSupportedVersion = Version::ANDROID_Q; + HalVersion minSupportedHalVersion = HalVersion::V1_2; if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + minSupportedHalVersion = HalVersion::V1_3; } - return minSupportedVersion; + return validateHalVersion(context, minSupportedHalVersion); } bool prepare(IOperationExecutionContext* context) { @@ -129,7 +132,7 @@ return executeTyped<int8_t>(context); } break; default: { - LOG(ERROR) << "Unsupported data type: " << inputShape.type; + LOG(ERROR) << "Unsupported data type: " << toString(inputShape.type); return false; } }
diff --git a/common/operations/Transpose.cpp b/common/operations/Transpose.cpp index 4b4ef7f..ff70f9e 100644 --- a/common/operations/Transpose.cpp +++ b/common/operations/Transpose.cpp
@@ -18,15 +18,14 @@ #include <vector> +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" -#include "Tracing.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #include <tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h> #include <tensorflow/lite/kernels/internal/reference/reference_ops.h> -#include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION +#include "Tracing.h" namespace android { namespace nn { @@ -41,9 +40,10 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + template <typename T> bool transposeGeneric(const T* inputData, const Shape& inputShape, const int32_t* perm, const Shape& permShape, T* outputData, const Shape& outputShape) { @@ -72,20 +72,18 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); const OperandType inputType = context->getInputType(kInputTensor); - auto minSupportedVersion = Version::ANDROID_OC_MR1; if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_QUANT8_ASYMM) { - minSupportedVersion = Version::ANDROID_P; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_1)); } else if (inputType == OperandType::TENSOR_FLOAT16) { - minSupportedVersion = Version::ANDROID_Q; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_3)); } else { NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } @@ -93,12 +91,10 @@ if (hasKnownRank(input)) { NN_RET_CHECK_LE(getNumberOfDimensions(input), 4); } - NN_RET_CHECK(validateInputTypes(context, {inputType, OperandType::TENSOR_INT32})); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + return validateInputTypes(context, {inputType, OperandType::TENSOR_INT32}) && + validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { // Only the permutation tensor can be omitted. NN_RET_CHECK(!context->isOmittedInput(kInputTensor)); @@ -174,7 +170,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace transpose
diff --git a/common/operations/TransposeConv2D.cpp b/common/operations/TransposeConv2D.cpp index 7c16884..d67a473 100644 --- a/common/operations/TransposeConv2D.cpp +++ b/common/operations/TransposeConv2D.cpp
@@ -16,21 +16,19 @@ #define LOG_TAG "Operations" +#include <tensorflow/lite/kernels/internal/common.h> + #include <algorithm> #include <cfloat> #include <cmath> #include <memory> #include <vector> +#include "CpuOperationUtils.h" +#include "HalInterfaces.h" #include "OperationResolver.h" #include "Tracing.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include <tensorflow/lite/kernels/internal/common.h> - -#include "CpuOperationUtils.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION - namespace android { namespace nn { namespace transpose_conv_2d { @@ -48,6 +46,8 @@ namespace { +using namespace hal; + // If possible we will use this static buffer for the tensor. constexpr size_t kStaticBufferSize = 1605632; char static_scratch_buffer[kStaticBufferSize]; @@ -107,7 +107,6 @@ } }; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION #define ANDROID_NN_TRANSPOSE_CONV_PARAMETERS \ uint32_t numBatches = getSizeOfDimension(inputShape, 0); \ uint32_t inputHeight = getSizeOfDimension(inputShape, 1); \ @@ -434,18 +433,17 @@ } #undef ANDROID_NN_TRANSPOSE_CONV_PARAMETERS -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { const uint32_t inputCount = context->getNumInputs(); NN_RET_CHECK(inputCount == kNumInputs1 || inputCount == kNumInputs2); NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); const auto inputType = context->getInputType(kInputTensor); const auto filterType = context->getInputType(kFilterTensor); std::vector<OperandType> inExpectedTypes; - Version minSupportedVersion = Version::ANDROID_Q; + HalVersion minSupportedHalVersion = HalVersion::V1_2; if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_FLOAT16) { inExpectedTypes = {inputType, inputType, inputType}; } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || @@ -454,16 +452,14 @@ filterType == inputType) << "Unsupported filter tensor type for operation " << kOperationName; if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - NN_RET_CHECK_EQ(std::get<Operand::SymmPerChannelQuantParams>( - context->getInputExtraParams(kFilterTensor)) - .channelDim, + NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim, 0) << "Unsupported filter tensor channel dimension for operation " << kOperationName; } inExpectedTypes = {inputType, filterType, OperandType::TENSOR_INT32}; if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - minSupportedVersion = Version::ANDROID_R; + minSupportedHalVersion = HalVersion::V1_3; } } else { NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " << kOperationName; @@ -479,12 +475,11 @@ OperandType::INT32, OperandType::INT32, OperandType::BOOL}; } inExpectedTypes.insert(inExpectedTypes.end(), argExpectedTypes.begin(), argExpectedTypes.end()); - NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); - NN_RET_CHECK(validateOutputTypes(context, {inputType})); - return minSupportedVersion; + NN_RET_CHECK(validateHalVersion(context, minSupportedHalVersion)); + return validateInputTypes(context, inExpectedTypes) && + validateOutputTypes(context, {inputType}); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape input = context->getInputShape(kInputTensor); Shape filter = context->getInputShape(kFilterTensor); @@ -575,9 +570,7 @@ context->getInputShape(kInputTensor), context->getInputBuffer<int8_t>(kFilterTensor), context->getInputShape(kFilterTensor), - std::get<Operand::SymmPerChannelQuantParams>( - context->getInputExtraParams(kFilterTensor)) - .scales.data(), + context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(), context->getInputBuffer<int32_t>(kBiasTensor), context->getInputShape(kBiasTensor), param, context->getOutputBuffer<uint8_t>(kOutputTensor), @@ -602,9 +595,7 @@ context->getInputShape(kInputTensor), context->getInputBuffer<int8_t>(kFilterTensor), context->getInputShape(kFilterTensor), - std::get<Operand::SymmPerChannelQuantParams>( - context->getInputExtraParams(kFilterTensor)) - .scales.data(), + context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(), context->getInputBuffer<int32_t>(kBiasTensor), context->getInputShape(kBiasTensor), param, context->getOutputBuffer<int8_t>(kOutputTensor), @@ -626,7 +617,6 @@ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; } } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace transpose_conv_2d
diff --git a/common/operations/UnidirectionalSequenceLSTM.cpp b/common/operations/UnidirectionalSequenceLSTM.cpp index 00edb30..03854f6 100644 --- a/common/operations/UnidirectionalSequenceLSTM.cpp +++ b/common/operations/UnidirectionalSequenceLSTM.cpp
@@ -18,14 +18,12 @@ #include <vector> +#include "HalInterfaces.h" #include "IndexedShapeWrapper.h" +#include "LSTM.h" #include "OperationResolver.h" #include "OperationsUtils.h" -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION -#include "LSTM.h" -#endif // NN_INCLUDE_CPU_IMPLEMENTATION - namespace android { namespace nn { namespace unidirectional_sequence_lstm { @@ -88,9 +86,10 @@ constexpr uint32_t kOutputStateOutTensor = 1; constexpr uint32_t kCellStateOutTensor = 2; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + inline bool hasTensor(IOperationExecutionContext* context, const uint32_t tensor) { return context->getInputBuffer(tensor) != nullptr; } @@ -115,9 +114,8 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); const uint32_t numOutputs = context->getNumOutputs(); NN_RET_CHECK(numOutputs == kNumOutputs || numOutputs == kNumOutputsWithState); @@ -159,19 +157,18 @@ } else { NN_RET_CHECK_FAIL() << "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_LSTM op: " - << inputType; + << toString(inputType); } - Version minVersionSupported = Version::ANDROID_Q; + HalVersion minHalVersionSupported = HalVersion::V1_2; if (context->getNumOutputs() == kNumOutputsWithState) { - minVersionSupported = Version::ANDROID_R; + minHalVersionSupported = HalVersion::V1_3; outExpectedTypes.insert(outExpectedTypes.end(), {inputType, inputType}); } NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); NN_RET_CHECK(validateOutputTypes(context, outExpectedTypes)); - return minVersionSupported; + return validateHalVersion(context, minHalVersionSupported); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { // Check that none of the required inputs are omitted const std::vector<int> requiredInputs = { @@ -518,7 +515,6 @@ } return true; } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace unidirectional_sequence_lstm
diff --git a/common/operations/UnidirectionalSequenceRNN.cpp b/common/operations/UnidirectionalSequenceRNN.cpp index a4dcff8..273b701 100644 --- a/common/operations/UnidirectionalSequenceRNN.cpp +++ b/common/operations/UnidirectionalSequenceRNN.cpp
@@ -20,9 +20,9 @@ #include <utility> #include <vector> +#include "HalInterfaces.h" #include "OperationResolver.h" #include "RNN.h" -#include "nnapi/TypeUtils.h" namespace android { namespace nn { @@ -42,9 +42,10 @@ constexpr uint32_t kOutputTensor = 0; constexpr uint32_t kStateOutputTensor = 1; -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION namespace { +using namespace hal; + template <typename T> void transposeFirstTwoDims(const T* input, const Shape& inputShape, T* output) { const uint32_t firstDimSize = getSizeOfDimension(inputShape, 0); @@ -126,30 +127,29 @@ } } // namespace -#endif // NN_INCLUDE_CPU_IMPLEMENTATION -Result<Version> validate(const IOperationValidationContext* context) { +bool validate(const IOperationValidationContext* context) { NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs); const int numOutputs = context->getNumOutputs(); NN_RET_CHECK(numOutputs == kNumOutputs || numOutputs == kNumOutputsWithState); OperandType inputType = context->getInputType(kInputTensor); if (inputType != OperandType::TENSOR_FLOAT16 && inputType != OperandType::TENSOR_FLOAT32) { - return NN_ERROR() << "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_RNN op: " - << inputType; + LOG(ERROR) << "Unsupported input operand type for UNIDIRECTIONAL_SEQUENCE_RNN op: " + << toString(inputType); + return false; } NN_RET_CHECK(validateInputTypes(context, {inputType, inputType, inputType, inputType, inputType, OperandType::INT32, OperandType::INT32})); std::vector<OperandType> outputTypes = {inputType}; - Version minVersionSupported = Version::ANDROID_Q; + HalVersion minHalVersionSupported = HalVersion::V1_2; if (numOutputs == kNumOutputsWithState) { - minVersionSupported = Version::ANDROID_R; + minHalVersionSupported = HalVersion::V1_3; outputTypes.push_back(inputType); } NN_RET_CHECK(validateOutputTypes(context, outputTypes)); - return minVersionSupported; + return validateHalVersion(context, minHalVersionSupported); } -#ifdef NN_INCLUDE_CPU_IMPLEMENTATION bool prepare(IOperationExecutionContext* context) { Shape input = context->getInputShape(kInputTensor); Shape weights = context->getInputShape(kWeightsTensor); @@ -205,7 +205,6 @@ } return true; } -#endif // NN_INCLUDE_CPU_IMPLEMENTATION } // namespace unidirectional_sequence_rnn
diff --git a/common/random/Android.bp b/common/random/Android.bp index b3b5bb8..8e3f93c 100644 --- a/common/random/Android.bp +++ b/common/random/Android.bp
@@ -14,10 +14,6 @@ * limitations under the License. */ -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - cc_library_headers { name: "philox_random_headers", host_supported: false,
diff --git a/common/random/philox_random.h b/common/random/philox_random.h index 8fef6e4..9d077f7 100644 --- a/common/random/philox_random.h +++ b/common/random/philox_random.h
@@ -20,7 +20,7 @@ #ifndef TENSORFLOW_CORE_LIB_RANDOM_PHILOX_RANDOM_H_ #define TENSORFLOW_CORE_LIB_RANDOM_PHILOX_RANDOM_H_ -#include <stdint.h> +#include <stdlib.h> // Function qualifiers that need to work on both CPU and GPU. #if defined(__CUDACC__)
diff --git a/common/random/random_distributions.h b/common/random/random_distributions.h index 69271ce..feb5c64 100644 --- a/common/random/random_distributions.h +++ b/common/random/random_distributions.h
@@ -18,15 +18,14 @@ #define _USE_MATH_DEFINES #include <math.h> - #include <cmath> #undef _USE_MATH_DEFINES #include <string.h> -#include <tensorflow/core/lib/bfloat16/bfloat16.h> - #include <algorithm> #include <type_traits> + +#include <tensorflow/core/lib/bfloat16/bfloat16.h> #include <unsupported/Eigen/CXX11/Tensor> #include "philox_random.h"
diff --git a/common/random/simple_philox.h b/common/random/simple_philox.h index c2ca53a..38bc744 100644 --- a/common/random/simple_philox.h +++ b/common/random/simple_philox.h
@@ -18,7 +18,6 @@ #include <math.h> #include <string.h> - #include <algorithm> #include "philox_random.h"
diff --git a/driver/cache/Android.bp b/driver/cache/Android.bp index cd3e89e..2fd388e 100644 --- a/driver/cache/Android.bp +++ b/driver/cache/Android.bp
@@ -16,10 +16,6 @@ // cc_defaults adapted in part from frameworks/native/opengl/libs/Android.bp -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - cc_defaults { name: "ml_nn_cache_libs_defaults", defaults: ["neuralnetworks_defaults"],
diff --git a/driver/cache/BlobCache/Android.bp b/driver/cache/BlobCache/Android.bp index cf2a067..40fd2d5 100644 --- a/driver/cache/BlobCache/Android.bp +++ b/driver/cache/BlobCache/Android.bp
@@ -16,10 +16,6 @@ // Adapted in part from frameworks/native/opengl/libs/Android.bp -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - cc_test { name: "BlobCache_test", host_supported: true,
diff --git a/driver/cache/BlobCache/BlobCache.cpp b/driver/cache/BlobCache/BlobCache.cpp index a783bc8..e3274da 100644 --- a/driver/cache/BlobCache/BlobCache.cpp +++ b/driver/cache/BlobCache/BlobCache.cpp
@@ -25,7 +25,6 @@ #include <cutils/properties.h> #else #include <string.h> - #include <algorithm> static const char property_value[] = "[HOST]"; #define PROPERTY_VALUE_MAX (sizeof(property_value) - 1) @@ -46,7 +45,6 @@ #include <algorithm> #include <chrono> -#include <memory> namespace android { @@ -215,17 +213,16 @@ return valueBlobSize; } -static inline size_t align_sizet(size_t size) { - constexpr size_t alignment = alignof(size_t) - 1; - return (size + alignment) & ~alignment; +static inline size_t align4(size_t size) { + return (size + 3) & ~3; } size_t BlobCache::getFlattenedSize() const { - size_t size = align_sizet(sizeof(Header) + PROPERTY_VALUE_MAX); + size_t size = align4(sizeof(Header) + PROPERTY_VALUE_MAX); for (const CacheEntry& e : mCacheEntries) { std::shared_ptr<Blob> const& keyBlob = e.getKey(); std::shared_ptr<Blob> const& valueBlob = e.getValue(); - size += align_sizet(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize()); + size += align4(sizeof(EntryHeader) + keyBlob->getSize() + valueBlob->getSize()); } return size; } @@ -247,7 +244,7 @@ // Write cache entries uint8_t* byteBuffer = reinterpret_cast<uint8_t*>(buffer); - off_t byteOffset = align_sizet(sizeof(Header) + header->mBuildIdLength); + off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength); for (const CacheEntry& e : mCacheEntries) { std::shared_ptr<Blob> const& keyBlob = e.getKey(); std::shared_ptr<Blob> const& valueBlob = e.getValue(); @@ -255,7 +252,7 @@ size_t valueSize = valueBlob->getSize(); size_t entrySize = sizeof(EntryHeader) + keySize + valueSize; - size_t totalSize = align_sizet(entrySize); + size_t totalSize = align4(entrySize); if (byteOffset + totalSize > size) { ALOGE("flatten: not enough room for cache entries"); return -EINVAL; @@ -305,7 +302,7 @@ // Read cache entries const uint8_t* byteBuffer = reinterpret_cast<const uint8_t*>(buffer); - off_t byteOffset = align_sizet(sizeof(Header) + header->mBuildIdLength); + off_t byteOffset = align4(sizeof(Header) + header->mBuildIdLength); size_t numEntries = header->mNumEntries; for (size_t i = 0; i < numEntries; i++) { if (byteOffset + sizeof(EntryHeader) > size) { @@ -319,7 +316,7 @@ size_t valueSize = eheader->mValueSize; size_t entrySize = sizeof(EntryHeader) + keySize + valueSize; - size_t totalSize = align_sizet(entrySize); + size_t totalSize = align4(entrySize); if (byteOffset + totalSize > size) { mCacheEntries.clear(); ALOGE("unflatten: not enough room for cache entry");
diff --git a/driver/cache/BlobCache/BlobCache_test.cpp b/driver/cache/BlobCache/BlobCache_test.cpp index ad1b0bf..2635fcc 100644 --- a/driver/cache/BlobCache/BlobCache_test.cpp +++ b/driver/cache/BlobCache/BlobCache_test.cpp
@@ -14,10 +14,7 @@ ** limitations under the License. */ -#include "BlobCache.h" - #include <fcntl.h> -#include <gtest/gtest.h> #include <stdio.h> #include <stdlib.h> @@ -26,6 +23,10 @@ #include <numeric> #include <random> +#include <gtest/gtest.h> + +#include "BlobCache.h" + namespace android { template <typename T> @@ -50,7 +51,7 @@ std::unique_ptr<BlobCache> mBC; }; -INSTANTIATE_TEST_SUITE_P( +INSTANTIATE_TEST_CASE_P( Policy, BlobCacheTest, ::testing::Values( BlobCache::Policy(BlobCache::Select::RANDOM, BlobCache::Capacity::HALVE), @@ -512,7 +513,7 @@ sp<BlobCache> mBC2; }; -INSTANTIATE_TEST_SUITE_P( +INSTANTIATE_TEST_CASE_P( Policy, BlobCacheFlattenTest, ::testing::Values( BlobCache::Policy(BlobCache::Select::RANDOM, BlobCache::Capacity::HALVE),
diff --git a/driver/cache/nnCache/Android.bp b/driver/cache/nnCache/Android.bp index cf50e94..d196da9 100644 --- a/driver/cache/nnCache/Android.bp +++ b/driver/cache/nnCache/Android.bp
@@ -18,10 +18,6 @@ // - frameworks/native/opengl/libs/Android.bp // - frameworks/native/opengl/tests/EGLTest/Android.bp -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - cc_test { name: "nnCache_test",
diff --git a/driver/cache/nnCache/nnCache.cpp b/driver/cache/nnCache/nnCache.cpp index c1b0a9f..9f9e9be 100644 --- a/driver/cache/nnCache/nnCache.cpp +++ b/driver/cache/nnCache/nnCache.cpp
@@ -16,15 +16,15 @@ #include "nnCache.h" -#include <fcntl.h> #include <inttypes.h> -#include <log/log.h> #include <sys/mman.h> #include <sys/stat.h> #include <unistd.h> #include <thread> +#include <log/log.h> + // Cache file header static const char* cacheFileMagic = "nn$$"; static const size_t cacheFileHeaderSize = 8; @@ -243,7 +243,7 @@ return; } - // Validity check the size before trying to mmap it. + // Sanity check the size before trying to mmap it. size_t fileSize = statBuf.st_size; if (fileSize > mMaxTotalSize * 2) { ALOGE("cache file is too large: %#" PRIx64, static_cast<off64_t>(statBuf.st_size));
diff --git a/driver/cache/nnCache/nnCache.h b/driver/cache/nnCache/nnCache.h index 6c5531a..a0ec6ee 100644 --- a/driver/cache/nnCache/nnCache.h +++ b/driver/cache/nnCache/nnCache.h
@@ -17,13 +17,13 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_CACHE_NN_CACHE_NN_CACHE_H #define ANDROID_FRAMEWORKS_ML_NN_DRIVER_CACHE_NN_CACHE_NN_CACHE_H +#include "BlobCache.h" + #include <functional> #include <memory> #include <mutex> #include <string> -#include "BlobCache.h" - // ---------------------------------------------------------------------------- namespace android { // ----------------------------------------------------------------------------
diff --git a/driver/cache/nnCache/nnCache_test.cpp b/driver/cache/nnCache/nnCache_test.cpp index 9d57f53..7ef2ccc 100644 --- a/driver/cache/nnCache/nnCache_test.cpp +++ b/driver/cache/nnCache/nnCache_test.cpp
@@ -17,16 +17,16 @@ #define LOG_TAG "nnCache_test" //#define LOG_NDEBUG 0 -#include "nnCache.h" +#include <stdlib.h> +#include <string.h> + +#include <memory> #include <android-base/file.h> #include <gtest/gtest.h> #include <log/log.h> -#include <stdlib.h> -#include <string.h> -#include <algorithm> -#include <memory> +#include "nnCache.h" // Cache size limits. static const size_t maxKeySize = 12 * 1024; @@ -47,7 +47,7 @@ NNCache* mCache; }; -INSTANTIATE_TEST_SUITE_P( +INSTANTIATE_TEST_CASE_P( Policy, NNCacheTest, ::testing::Values(NNCache::Policy(NNCache::Select::RANDOM, NNCache::Capacity::HALVE), NNCache::Policy(NNCache::Select::LRU, NNCache::Capacity::HALVE),
diff --git a/driver/sample/Android.bp b/driver/sample/Android.bp index 9294b68..cec4d65 100644 --- a/driver/sample/Android.bp +++ b/driver/sample/Android.bp
@@ -14,10 +14,6 @@ * limitations under the License. */ -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - cc_defaults { name: "NeuralNetworksSampleDriver_defaults", defaults: ["neuralnetworks_defaults"], @@ -33,7 +29,6 @@ "libneuralnetworks_headers", ], shared_libs: [ - "android.hardware.neuralnetworks-V1-ndk_platform", "[email protected]", "[email protected]", "[email protected]", @@ -49,6 +44,7 @@ "libhidlmemory", "liblog", "libnativewindow", + "libsync", "libtextclassifier_hash", "libutils", ], @@ -81,23 +77,6 @@ } cc_binary { - name: "[email protected]", - srcs: ["SampleDriverFloatXNNPACK.cpp"], - defaults: ["NeuralNetworksSampleDriver_server_defaults"], - init_rc: ["config/android.hardware.neuralnetworks@1.3-service-sample-float-xnnpack.rc"], - vintf_fragments: [ - "config/android.hardware.neuralnetworks@1.3-service-sample-float-xnnpack.xml", - ], - static_libs: [ - "libXNNPACK", - "libpthreadpool", - ], - cflags: [ - "-Wno-unused-parameter", - ], -} - -cc_binary { name: "[email protected]", srcs: ["SampleDriverFloatSlow.cpp"], defaults: ["NeuralNetworksSampleDriver_server_defaults"], @@ -126,50 +105,3 @@ defaults: ["NeuralNetworksSampleDriver_defaults"], export_include_dirs: ["."], } - -cc_library_static { - name: "neuralnetworks_canonical_sample_driver", - defaults: ["neuralnetworks_defaults"], - vendor_available: true, - // b/109953668, disable OpenMP - // openmp: true, - srcs: [ - "Canonical*.cpp", - "LimitedSupportDevice.cpp", - ], - header_libs: ["libneuralnetworks_headers"], - export_include_dirs: ["."], - static_libs: ["libneuralnetworks_common"], - shared_libs: [ - "libbase", - "libnativewindow", - ], -} - -cc_library_shared { - name: "neuralnetworks_sample_sl_driver", - defaults: ["neuralnetworks_defaults"], - srcs: ["SampleDriverSL.cpp"], - shared_libs: [ - "libbase", - "libcutils", - "liblog", - "libnativewindow", - ], - // Change the soname, this library will be added as cc_prebuilt_library_shared - // with different name to android.hardware.neuralnetworks-shim-service-sample - ldflags: [ - "-Wl,-soname,neuralnetworks_sample_sl_driver_prebuilt.so", - ], - static_libs: [ - "libbase_ndk", - "libcrypto_static", - "libneuralnetworks_common_cl_cpu", - "libtflite_static", - "neuralnetworks_canonical_sample_driver", - "neuralnetworks_types_cl", - ], - whole_static_libs: [ - "libneuralnetworks_cl", - ], -}
diff --git a/driver/sample/CanonicalBuffer.cpp b/driver/sample/CanonicalBuffer.cpp deleted file mode 100644 index 58b12ba..0000000 --- a/driver/sample/CanonicalBuffer.cpp +++ /dev/null
@@ -1,100 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "CanonicalBuffer.h" - -#include <android-base/logging.h> -#include <nnapi/IPreparedModel.h> -#include <nnapi/Result.h> -#include <nnapi/Types.h> - -#include <algorithm> -#include <memory> -#include <utility> - -namespace android::nn::sample { -namespace { - -void copyRunTimePoolInfos(const RunTimePoolInfo& srcPool, const RunTimePoolInfo& dstPool) { - CHECK(srcPool.getBuffer() != nullptr); - CHECK(dstPool.getBuffer() != nullptr); - CHECK(srcPool.getSize() == dstPool.getSize()); - std::copy(srcPool.getBuffer(), srcPool.getBuffer() + srcPool.getSize(), dstPool.getBuffer()); - dstPool.flush(); -} - -GeneralResult<void> copyFromInternal(const SharedMemory& src, const Dimensions& dimensions, - const std::shared_ptr<ManagedBuffer>& bufferWrapper) { - CHECK(bufferWrapper != nullptr); - const auto srcPool = RunTimePoolInfo::createFromMemory(src); - if (!srcPool.has_value()) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "SampleBuffer::copyFrom -- unable to map src memory."; - } - const ErrorStatus validationStatus = - bufferWrapper->validateCopyFrom(dimensions, srcPool->getSize()); - if (validationStatus != ErrorStatus::NONE) { - return NN_ERROR(validationStatus); - } - const auto dstPool = bufferWrapper->createRunTimePoolInfo(); - copyRunTimePoolInfos(srcPool.value(), dstPool); - - return {}; -} - -} // namespace - -Buffer::Buffer(std::shared_ptr<ManagedBuffer> buffer, std::unique_ptr<BufferTracker::Token> token) - : kBuffer(std::move(buffer)), kToken(std::move(token)) { - CHECK(kBuffer != nullptr); - CHECK(kToken != nullptr); -} - -Request::MemoryDomainToken Buffer::getToken() const { - return Request::MemoryDomainToken{kToken->get()}; -} - -GeneralResult<void> Buffer::copyTo(const SharedMemory& dst) const { - const auto dstPool = RunTimePoolInfo::createFromMemory(dst); - if (!dstPool.has_value()) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "SampleBuffer::copyTo -- unable to map dst memory."; - } - - const ErrorStatus validationStatus = kBuffer->validateCopyTo(dstPool->getSize()); - if (validationStatus != ErrorStatus::NONE) { - return NN_ERROR(validationStatus); - } - - const auto srcPool = kBuffer->createRunTimePoolInfo(); - copyRunTimePoolInfos(srcPool, dstPool.value()); - - return {}; -} - -GeneralResult<void> Buffer::copyFrom(const SharedMemory& src, const Dimensions& dimensions) const { - if (const auto result = copyFromInternal(src, dimensions, kBuffer); !result.ok()) { - kBuffer->setInitialized(false); - NN_TRY(result); - } - - kBuffer->updateDimensions(dimensions); - kBuffer->setInitialized(true); - - return {}; -} - -} // namespace android::nn::sample
diff --git a/driver/sample/CanonicalBuffer.h b/driver/sample/CanonicalBuffer.h deleted file mode 100644 index b5fc2c1..0000000 --- a/driver/sample/CanonicalBuffer.h +++ /dev/null
@@ -1,47 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_CANONICAL_BUFFER_H -#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_CANONICAL_BUFFER_H - -#include <BufferTracker.h> -#include <CpuExecutor.h> -#include <nnapi/IBuffer.h> -#include <nnapi/Result.h> -#include <nnapi/Types.h> - -#include <memory> - -namespace android::nn::sample { - -class Buffer final : public IBuffer { - public: - Buffer(std::shared_ptr<ManagedBuffer> buffer, std::unique_ptr<BufferTracker::Token> token); - - Request::MemoryDomainToken getToken() const override; - - GeneralResult<void> copyTo(const SharedMemory& dst) const override; - GeneralResult<void> copyFrom(const SharedMemory& src, - const Dimensions& dimensions) const override; - - private: - const std::shared_ptr<ManagedBuffer> kBuffer; - const std::unique_ptr<BufferTracker::Token> kToken; -}; - -} // namespace android::nn::sample - -#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_CANONICAL_BUFFER_H
diff --git a/driver/sample/CanonicalBurst.cpp b/driver/sample/CanonicalBurst.cpp deleted file mode 100644 index 828216a..0000000 --- a/driver/sample/CanonicalBurst.cpp +++ /dev/null
@@ -1,55 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "CanonicalBurst.h" - -#include <DefaultExecution.h> -#include <android-base/logging.h> -#include <nnapi/IBurst.h> -#include <nnapi/IPreparedModel.h> -#include <nnapi/Result.h> -#include <nnapi/Types.h> - -#include <memory> -#include <optional> -#include <utility> -#include <vector> - -namespace android::nn::sample { - -Burst::Burst(std::shared_ptr<const PreparedModel> preparedModel) - : kPreparedModel(std::move(preparedModel)) { - CHECK(kPreparedModel != nullptr); -} - -Burst::OptionalCacheHold Burst::cacheMemory(const SharedMemory& /*memory*/) const { - return nullptr; -} - -ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> Burst::execute( - const Request& request, MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const { - return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration); -} - -GeneralResult<SharedExecution> Burst::createReusableExecution( - const Request& request, MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const { - return std::make_shared<DefaultExecution>(kPreparedModel, request, measure, - loopTimeoutDuration); -} - -} // namespace android::nn::sample
diff --git a/driver/sample/CanonicalBurst.h b/driver/sample/CanonicalBurst.h deleted file mode 100644 index de3689d..0000000 --- a/driver/sample/CanonicalBurst.h +++ /dev/null
@@ -1,55 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_CANONICAL_BURST_H -#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_CANONICAL_BURST_H - -#include <nnapi/IBurst.h> -#include <nnapi/IPreparedModel.h> -#include <nnapi/Result.h> -#include <nnapi/Types.h> - -#include <memory> -#include <optional> -#include <utility> -#include <vector> - -#include "CanonicalPreparedModel.h" - -namespace android::nn::sample { - -// Class that adapts nn::sample::PreparedModel to nn::sample::IBurst. -class Burst final : public IBurst { - public: - explicit Burst(std::shared_ptr<const PreparedModel> preparedModel); - - OptionalCacheHold cacheMemory(const SharedMemory& memory) const override; - - ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> execute( - const Request& request, MeasureTiming measure, const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const override; - - GeneralResult<SharedExecution> createReusableExecution( - const Request& request, MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const override; - - private: - const std::shared_ptr<const PreparedModel> kPreparedModel; -}; - -} // namespace android::nn::sample - -#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_CANONICAL_BURST_H
diff --git a/driver/sample/CanonicalDevice.cpp b/driver/sample/CanonicalDevice.cpp deleted file mode 100644 index 6478579..0000000 --- a/driver/sample/CanonicalDevice.cpp +++ /dev/null
@@ -1,258 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "CanonicalDevice.h" - -#include <Tracing.h> -#include <android-base/logging.h> -#include <nnapi/IBuffer.h> -#include <nnapi/IDevice.h> -#include <nnapi/IPreparedModel.h> -#include <nnapi/OperandTypes.h> -#include <nnapi/Result.h> -#include <nnapi/Types.h> -#include <nnapi/Validation.h> - -#include <algorithm> -#include <any> -#include <functional> -#include <iterator> -#include <memory> -#include <optional> -#include <set> -#include <string> -#include <utility> -#include <vector> - -#include "CanonicalBuffer.h" -#include "CanonicalPreparedModel.h" - -namespace android::nn::sample { -namespace { - -Capabilities makeCapabilities() { - constexpr float kPerf = 1.0f; - const Capabilities::PerformanceInfo kPerfInfo = {.execTime = kPerf, .powerUsage = kPerf}; - - constexpr OperandType kOperandsTypes[] = { - OperandType::FLOAT32, - OperandType::INT32, - OperandType::UINT32, - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - OperandType::TENSOR_QUANT8_ASYMM, - OperandType::BOOL, - OperandType::TENSOR_QUANT16_SYMM, - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_BOOL8, - OperandType::FLOAT16, - OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, - OperandType::TENSOR_QUANT16_ASYMM, - OperandType::TENSOR_QUANT8_SYMM, - OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - }; - - std::vector<Capabilities::OperandPerformance> operandPerformance; - operandPerformance.reserve(std::size(kOperandsTypes)); - std::transform(std::begin(kOperandsTypes), std::end(kOperandsTypes), - std::back_inserter(operandPerformance), [kPerfInfo](OperandType op) { - return Capabilities::OperandPerformance{.type = op, .info = kPerfInfo}; - }); - auto table = - Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)).value(); - - return {.relaxedFloat32toFloat16PerformanceScalar = kPerfInfo, - .relaxedFloat32toFloat16PerformanceTensor = kPerfInfo, - .operandPerformance = std::move(table), - .ifPerformance = kPerfInfo, - .whilePerformance = kPerfInfo}; -} - -std::string toString(const Dimensions& dimensions) { - std::ostringstream oss; - oss << "["; - for (size_t i = 0; i < dimensions.size(); ++i) { - if (i != 0) oss << ", "; - oss << dimensions[i]; - } - oss << "]"; - return oss.str(); -} - -} // namespace - -Device::Device(std::string name, const IOperationResolver* operationResolver) - : kName(std::move(name)), kOperationResolver(*operationResolver) { - CHECK(operationResolver != nullptr); - initVLogMask(); -} - -const std::string& Device::getName() const { - return kName; -} - -const std::string& Device::getVersionString() const { - static const std::string kVersionString = "JUST_AN_EXAMPLE"; - return kVersionString; -} - -Version Device::getFeatureLevel() const { - return Version::ANDROID_S; -} - -DeviceType Device::getType() const { - return DeviceType::CPU; -} - -const std::vector<Extension>& Device::getSupportedExtensions() const { - static const std::vector<Extension> kExtensions = {/* No extensions. */}; - return kExtensions; -} - -const Capabilities& Device::getCapabilities() const { - static const Capabilities kCapabilities = makeCapabilities(); - return kCapabilities; -} - -std::pair<uint32_t, uint32_t> Device::getNumberOfCacheFilesNeeded() const { - return std::make_pair(/*numModelCache=*/0, /*numDataCache=*/0); -} - -GeneralResult<void> Device::wait() const { - return {}; -} - -GeneralResult<std::vector<bool>> Device::getSupportedOperations(const Model& model) const { - VLOG(DRIVER) << "sample::Device::getSupportedOperations"; - - // Validate arguments. - if (const auto result = validate(model); !result.ok()) { - return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << result.error(); - } - - // Mark all operations except extension operations as supported. - std::vector<bool> supported; - supported.reserve(model.main.operations.size()); - std::transform(model.main.operations.begin(), model.main.operations.end(), - std::back_inserter(supported), [](const Operation& operation) { - return !isExtensionOperationType(operation.type) && - operation.type != OperationType::OEM_OPERATION; - }); - - return supported; -} - -GeneralResult<SharedPreparedModel> Device::prepareModel( - const Model& model, ExecutionPreference preference, Priority priority, - OptionalTimePoint deadline, const std::vector<SharedHandle>& /*modelCache*/, - const std::vector<SharedHandle>& /*dataCache*/, const CacheToken& /*token*/) const { - if (VLOG_IS_ON(DRIVER)) { - VLOG(DRIVER) << "sample::Device::prepareModel"; - logModelToInfo(model); - } - - // Validate arguments. - if (const auto result = validate(model); !result.ok()) { - return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Invalid Model: " << result.error(); - } - if (const auto result = validate(preference); !result.ok()) { - return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) - << "Invalid ExecutionPreference: " << result.error(); - } - if (const auto result = validate(priority); !result.ok()) { - return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Invalid Priority: " << result.error(); - } - - // Check if deadline has passed. - if (hasDeadlinePassed(deadline)) { - return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT); - } - - std::vector<RunTimePoolInfo> poolInfos; - if (!setRunTimePoolInfosFromCanonicalMemories(&poolInfos, model.pools)) { - return NN_ERROR() << "setRunTimePoolInfosFromCanonicalMemories failed"; - } - - // Create the prepared model. - return std::make_shared<const PreparedModel>(model, preference, priority, &kOperationResolver, - kBufferTracker, std::move(poolInfos)); -} - -GeneralResult<SharedPreparedModel> Device::prepareModelFromCache( - OptionalTimePoint /*deadline*/, const std::vector<SharedHandle>& /*modelCache*/, - const std::vector<SharedHandle>& /*dataCache*/, const CacheToken& /*token*/) const { - NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, - "sample::Device::prepareModelFromCache"); - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "prepareModelFromCache not supported on sample::Device::prepareModelFromCache(" - << kName << ")"; -} - -GeneralResult<SharedBuffer> Device::allocate(const BufferDesc& desc, - const std::vector<SharedPreparedModel>& preparedModels, - const std::vector<BufferRole>& inputRoles, - const std::vector<BufferRole>& outputRoles) const { - VLOG(DRIVER) << "sample::Device::allocate"; - std::set<PreparedModelRole> roles; - Operand operand; - auto getModel = [](const SharedPreparedModel& preparedModel) -> const Model* { - std::any resource = preparedModel->getUnderlyingResource(); - const Model** maybeModel = std::any_cast<const Model*>(&resource); - if (maybeModel == nullptr) { - LOG(ERROR) << "sample::Device::allocate -- unknown remote IPreparedModel."; - return nullptr; - } - return *maybeModel; - }; - if (const auto result = validateMemoryDesc(desc, preparedModels, inputRoles, outputRoles, - getModel, &roles, &operand); - !result.ok()) { - return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) - << "sample::Device::allocate -- validation failed: " << result.error(); - } - - if (isExtensionOperandType(operand.type)) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "sample::Device::allocate -- does not support extension type."; - } - - // TODO(xusongw): Support allocating buffers with unknown dimensions or rank. - uint32_t size = nonExtensionOperandSizeOfData(operand.type, operand.dimensions); - VLOG(DRIVER) << "sample::Device::allocate -- type = " << operand.type - << ", dimensions = " << toString(operand.dimensions) << ", size = " << size; - if (size == 0) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "sample::Device::allocate -- does not support dynamic output shape."; - } - - auto bufferWrapper = ManagedBuffer::create(size, std::move(roles), operand); - if (bufferWrapper == nullptr) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "sample::Device::allocate -- not enough memory."; - } - - auto token = kBufferTracker->add(bufferWrapper); - if (token == nullptr) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "sample::Device::allocate -- BufferTracker returned invalid token."; - } - - auto sampleBuffer = std::make_shared<const Buffer>(std::move(bufferWrapper), std::move(token)); - VLOG(DRIVER) << "sample::Device::allocate -- successfully allocates the requested memory"; - return sampleBuffer; -} - -} // namespace android::nn::sample
diff --git a/driver/sample/CanonicalDevice.h b/driver/sample/CanonicalDevice.h deleted file mode 100644 index eeae902..0000000 --- a/driver/sample/CanonicalDevice.h +++ /dev/null
@@ -1,78 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_CANONICAL_DEVICE_H -#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_CANONICAL_DEVICE_H - -#include <BufferTracker.h> -#include <CpuExecutor.h> -#include <nnapi/IBuffer.h> -#include <nnapi/IDevice.h> -#include <nnapi/OperandTypes.h> -#include <nnapi/Result.h> -#include <nnapi/Types.h> - -#include <functional> -#include <memory> -#include <optional> -#include <string> -#include <utility> -#include <vector> - -namespace android::nn::sample { - -class Device final : public IDevice { - public: - explicit Device(std::string name, - const IOperationResolver* operationResolver = BuiltinOperationResolver::get()); - - const std::string& getName() const override; - const std::string& getVersionString() const override; - Version getFeatureLevel() const override; - DeviceType getType() const override; - const std::vector<Extension>& getSupportedExtensions() const override; - const Capabilities& getCapabilities() const override; - std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override; - - GeneralResult<void> wait() const override; - - GeneralResult<std::vector<bool>> getSupportedOperations(const Model& model) const override; - - GeneralResult<SharedPreparedModel> prepareModel(const Model& model, - ExecutionPreference preference, - Priority priority, OptionalTimePoint deadline, - const std::vector<SharedHandle>& modelCache, - const std::vector<SharedHandle>& dataCache, - const CacheToken& token) const override; - - GeneralResult<SharedPreparedModel> prepareModelFromCache( - OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache, - const std::vector<SharedHandle>& dataCache, const CacheToken& token) const override; - - GeneralResult<SharedBuffer> allocate(const BufferDesc& desc, - const std::vector<SharedPreparedModel>& preparedModels, - const std::vector<BufferRole>& inputRoles, - const std::vector<BufferRole>& outputRoles) const override; - - private: - const std::string kName; - const IOperationResolver& kOperationResolver; - const std::shared_ptr<BufferTracker> kBufferTracker = BufferTracker::create(); -}; - -} // namespace android::nn::sample - -#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_CANONICAL_DEVICE_H
diff --git a/driver/sample/CanonicalPreparedModel.cpp b/driver/sample/CanonicalPreparedModel.cpp deleted file mode 100644 index 1546115..0000000 --- a/driver/sample/CanonicalPreparedModel.cpp +++ /dev/null
@@ -1,301 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "CanonicalPreparedModel.h" - -#include <DefaultExecution.h> -#include <Tracing.h> -#include <nnapi/IPreparedModel.h> -#include <nnapi/Result.h> -#include <nnapi/TypeUtils.h> -#include <nnapi/Types.h> -#include <nnapi/Validation.h> - -#include <memory> -#include <tuple> -#include <utility> -#include <vector> - -#include "CanonicalBurst.h" -#include "CanonicalDevice.h" - -namespace android::nn::sample { -namespace { - -GeneralResult<std::pair<std::vector<RunTimePoolInfo>, std::vector<std::shared_ptr<ManagedBuffer>>>> -createRunTimePoolInfos(const Request& request, const BufferTracker& bufferTracker, - const PreparedModel& preparedModel) { - std::vector<RunTimePoolInfo> requestPoolInfos; - std::vector<std::shared_ptr<ManagedBuffer>> bufferWrappers; - requestPoolInfos.reserve(request.pools.size()); - bufferWrappers.reserve(request.pools.size()); - for (uint32_t i = 0; i < request.pools.size(); ++i) { - auto& pool = request.pools[i]; - if (const auto* maybeMemory = std::get_if<SharedMemory>(&pool)) { - auto buffer = RunTimePoolInfo::createFromMemory(*maybeMemory); - if (!buffer.has_value()) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "createRuntimeMemoriesFromMemoryPools -- could not map pools"; - } - requestPoolInfos.push_back(std::move(*buffer)); - bufferWrappers.push_back(nullptr); - } else if (const auto* maybeToken = std::get_if<Request::MemoryDomainToken>(&pool)) { - auto bufferWrapper = bufferTracker.get(*maybeToken); - if (bufferWrapper == nullptr) { - return NN_ERROR(ErrorStatus::INVALID_ARGUMENT); - } - const auto validationStatus = - bufferWrapper->validateRequest(i, request, &preparedModel); - if (validationStatus != ErrorStatus::NONE) { - return NN_ERROR(validationStatus); - } - requestPoolInfos.push_back(bufferWrapper->createRunTimePoolInfo()); - bufferWrappers.push_back(std::move(bufferWrapper)); - } - } - return std::make_pair(std::move(requestPoolInfos), std::move(bufferWrappers)); -} - -template <typename T> -ExecutionResult<T> makeExecutionResult(GeneralResult<T> result) { - if (!result.has_value()) { - const auto& [message, code] = std::move(result).error(); - return error(code) << message; - } - return std::move(result).value(); -} - -ErrorStatus updateDeviceMemories(ErrorStatus status, const Request& request, - const std::vector<std::shared_ptr<ManagedBuffer>>& bufferWrappers, - const std::vector<OutputShape>& outputShapes) { - if (status == ErrorStatus::NONE) { - for (uint32_t i = 0; i < request.outputs.size(); i++) { - const uint32_t poolIndex = request.outputs[i].location.poolIndex; - const auto& pool = request.pools[poolIndex]; - if (std::holds_alternative<Request::MemoryDomainToken>(pool)) { - if (!bufferWrappers[poolIndex]->updateDimensions(outputShapes[i].dimensions)) { - return ErrorStatus::GENERAL_FAILURE; - } - } - } - for (uint32_t i = 0; i < request.outputs.size(); i++) { - const uint32_t poolIndex = request.outputs[i].location.poolIndex; - const auto& pool = request.pools[poolIndex]; - if (std::holds_alternative<Request::MemoryDomainToken>(pool)) { - bufferWrappers[poolIndex]->setInitialized(true); - } - } - } else if (status == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - // If CpuExecutor reports OUTPUT_INSUFFCIENT_SIZE on a device memory, this is because the - // dimensions of the device memory are incorrectly specified. The driver should return - // GENERAL_FAILURE instead in this case. - for (uint32_t i = 0; i < request.outputs.size(); i++) { - const uint32_t poolIndex = request.outputs[i].location.poolIndex; - const auto& pool = request.pools[poolIndex]; - if (std::holds_alternative<Request::MemoryDomainToken>(pool)) { - if (!outputShapes[i].isSufficient) { - LOG(ERROR) << "Invalid dimensions for output " << i - << ": actual shape = " << toString(outputShapes[i].dimensions); - return ErrorStatus::GENERAL_FAILURE; - } - } - } - } - return ErrorStatus::NONE; -} - -} // namespace - -PreparedModel::PreparedModel(Model model, ExecutionPreference preference, Priority priority, - const IOperationResolver* operationResolver, - std::shared_ptr<BufferTracker> bufferTracker, - std::vector<RunTimePoolInfo> poolInfos) - : kModel(std::move(model)), - kExecutionPreference(preference), - kExecutionPriority(priority), - kOperationResolver(*operationResolver), - kBufferTracker(std::move(bufferTracker)), - kPoolInfos(std::move(poolInfos)) { - CHECK(operationResolver != nullptr); - CHECK(kBufferTracker != nullptr); -} - -ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> PreparedModel::execute( - const Request& request, MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration) const { - NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, "sample::PreparedModel::execute"); - VLOG(DRIVER) << "sample::PreparedModel::execute(" << SHOW_IF_DEBUG(request) << ")"; - - TimePoint driverStart, driverEnd, deviceStart, deviceEnd; - if (measure == MeasureTiming::YES) driverStart = Clock::now(); - - if (const auto result = validateRequestForModel(request, kModel); !result.ok()) { - return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << result.error(); - } - if (hasDeadlinePassed(deadline)) { - return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT); - } - - NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INPUTS_AND_OUTPUTS, - "sample::Device::execute"); - const auto [requestPoolInfos, bufferWrappers] = - NN_TRY(makeExecutionResult(createRunTimePoolInfos(request, *kBufferTracker, *this))); - - NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, "sample::Device::execute"); - auto executor = CpuExecutor(&kOperationResolver); - if (loopTimeoutDuration.has_value()) { - executor.setLoopTimeout(loopTimeoutDuration->count()); - } - if (deadline.has_value()) { - executor.setDeadline(*deadline); - } - - // Perform execution. - if (measure == MeasureTiming::YES) deviceStart = Clock::now(); - int n = executor.run(kModel, request, kPoolInfos, requestPoolInfos); - if (measure == MeasureTiming::YES) deviceEnd = Clock::now(); - VLOG(DRIVER) << "executor.run returned " << n; - ErrorStatus executionStatus = convertResultCodeToErrorStatus(n); - const auto& outputShapes = executor.getOutputShapes(); - - // Update device memory metadata. - const ErrorStatus updateStatus = - updateDeviceMemories(executionStatus, request, bufferWrappers, outputShapes); - if (updateStatus != ErrorStatus::NONE) { - return NN_ERROR(updateStatus); - } - if (executionStatus != ErrorStatus::NONE) { - return NN_ERROR(executionStatus, outputShapes); - } - - Timing timing = {}; - if (measure == MeasureTiming::YES) { - driverEnd = Clock::now(); - timing = {.timeOnDevice = deviceEnd - deviceStart, .timeInDriver = driverEnd - driverStart}; - VLOG(DRIVER) << "sample::PreparedModel::execute timing = " << timing; - } - - return std::make_pair(outputShapes, timing); -} - -GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>> PreparedModel::executeFenced( - const Request& request, const std::vector<SyncFence>& waitFor, MeasureTiming measure, - const OptionalTimePoint& deadline, const OptionalDuration& loopTimeoutDuration, - const OptionalDuration& timeoutDurationAfterFence) const { - NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, - "sample::PreparedModel::executeFenced"); - VLOG(DRIVER) << "executeFenced(" << SHOW_IF_DEBUG(request) << ")"; - - TimePoint driverStart, driverEnd, deviceStart, deviceEnd; - if (measure == MeasureTiming::YES) driverStart = Clock::now(); - - if (const auto result = validateRequestForModel(request, kModel); !result.ok()) { - return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << result.error(); - } - if (hasDeadlinePassed(deadline)) { - return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT); - } - - // Wait for the dependent events to signal - for (const auto& syncFence : waitFor) { - if (!syncFence.getSharedHandle()) { - return NN_ERROR(ErrorStatus::INVALID_ARGUMENT); - } - if (syncFence.syncWait({}) != SyncFence::FenceState::SIGNALED) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "syncWait failed"; - } - } - - // Update deadline if the timeout duration is closer than the deadline. - auto closestDeadline = deadline; - if (timeoutDurationAfterFence.has_value()) { - const auto timeoutDurationDeadline = makeDeadline(*timeoutDurationAfterFence); - if (!closestDeadline.has_value() || *closestDeadline > timeoutDurationDeadline) { - closestDeadline = timeoutDurationDeadline; - } - } - - TimePoint driverStartAfterFence; - if (measure == MeasureTiming::YES) driverStartAfterFence = Clock::now(); - - NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INPUTS_AND_OUTPUTS, - "sample::PreparedModel::executeFenced"); - const auto [requestPoolInfos, bufferWrappers] = - NN_TRY(createRunTimePoolInfos(request, *kBufferTracker, *this)); - - NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, - "sample::PreparedModel::executeFenced"); - auto executor = CpuExecutor(&kOperationResolver); - if (loopTimeoutDuration.has_value()) { - executor.setLoopTimeout(loopTimeoutDuration->count()); - } - if (closestDeadline.has_value()) { - executor.setDeadline(*closestDeadline); - } - if (measure == MeasureTiming::YES) deviceStart = Clock::now(); - int n = executor.run(kModel, request, kPoolInfos, requestPoolInfos); - if (measure == MeasureTiming::YES) deviceEnd = Clock::now(); - VLOG(DRIVER) << "executor.run returned " << n; - ErrorStatus executionStatus = convertResultCodeToErrorStatus(n); - if (executionStatus != ErrorStatus::NONE) { - return NN_ERROR(executionStatus); - } - - // Set output memories to the initialized state. - for (const auto& output : request.outputs) { - const uint32_t poolIndex = output.location.poolIndex; - const auto& pool = request.pools[poolIndex]; - if (std::holds_alternative<Request::MemoryDomainToken>(pool)) { - bufferWrappers[poolIndex]->setInitialized(true); - } - } - - Timing timingSinceLaunch = {}; - Timing timingAfterFence = {}; - if (measure == MeasureTiming::YES) { - driverEnd = Clock::now(); - timingSinceLaunch = {.timeOnDevice = deviceEnd - deviceStart, - .timeInDriver = driverEnd - driverStart}; - timingAfterFence = {.timeOnDevice = deviceEnd - deviceStart, - .timeInDriver = driverEnd - driverStartAfterFence}; - VLOG(DRIVER) << "executeFenced timingSinceLaunch = " << timingSinceLaunch; - VLOG(DRIVER) << "executeFenced timingAfterFence = " << timingAfterFence; - } - - ExecuteFencedInfoCallback fencedExecutionCallback = [timingSinceLaunch, timingAfterFence]() { - return std::make_pair(timingSinceLaunch, timingAfterFence); - }; - return std::make_pair(SyncFence::createAsSignaled(), std::move(fencedExecutionCallback)); -} - -GeneralResult<SharedExecution> PreparedModel::createReusableExecution( - const Request& request, MeasureTiming measure, - const OptionalDuration& loopTimeoutDuration) const { - NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, - "sample::PreparedModel::createReusableExecution"); - return std::make_shared<DefaultExecution>(shared_from_this(), request, measure, - loopTimeoutDuration); -} - -GeneralResult<SharedBurst> PreparedModel::configureExecutionBurst() const { - return std::make_shared<const Burst>(shared_from_this()); -} - -std::any PreparedModel::getUnderlyingResource() const { - return &kModel; -} - -} // namespace android::nn::sample
diff --git a/driver/sample/CanonicalPreparedModel.h b/driver/sample/CanonicalPreparedModel.h deleted file mode 100644 index 51258b3..0000000 --- a/driver/sample/CanonicalPreparedModel.h +++ /dev/null
@@ -1,70 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_CANONICAL_PREPARED_MODEL_H -#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_CANONICAL_PREPARED_MODEL_H - -#include <BufferTracker.h> -#include <CpuExecutor.h> -#include <nnapi/IExecution.h> -#include <nnapi/IPreparedModel.h> -#include <nnapi/Result.h> -#include <nnapi/Types.h> - -#include <memory> -#include <tuple> -#include <utility> -#include <vector> - -namespace android::nn::sample { - -class PreparedModel final : public IPreparedModel, - public std::enable_shared_from_this<PreparedModel> { - public: - PreparedModel(Model model, ExecutionPreference preference, Priority priority, - const IOperationResolver* operationResolver, - std::shared_ptr<BufferTracker> bufferTracker, - std::vector<RunTimePoolInfo> poolInfos); - - ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> execute( - const Request& request, MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration) const override; - - GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>> executeFenced( - const Request& request, const std::vector<SyncFence>& waitFor, MeasureTiming measure, - const OptionalTimePoint& deadline, const OptionalDuration& loopTimeoutDuration, - const OptionalDuration& timeoutDurationAfterFence) const override; - - GeneralResult<nn::SharedExecution> createReusableExecution( - const Request& request, MeasureTiming measure, - const OptionalDuration& loopTimeoutDuration) const override; - - GeneralResult<SharedBurst> configureExecutionBurst() const override; - - std::any getUnderlyingResource() const override; - - private: - const Model kModel; - [[maybe_unused]] const ExecutionPreference kExecutionPreference; - [[maybe_unused]] const Priority kExecutionPriority; - const IOperationResolver& kOperationResolver; - const std::shared_ptr<BufferTracker> kBufferTracker; - const std::vector<RunTimePoolInfo> kPoolInfos; -}; - -} // namespace android::nn::sample - -#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_CANONICAL_PREPARED_MODEL_H
diff --git a/driver/sample/LimitedSupportDevice.cpp b/driver/sample/LimitedSupportDevice.cpp deleted file mode 100644 index da0f73c..0000000 --- a/driver/sample/LimitedSupportDevice.cpp +++ /dev/null
@@ -1,116 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "LimitedSupportDevice.h" - -#include <android-base/logging.h> -#include <nnapi/IBuffer.h> -#include <nnapi/IDevice.h> -#include <nnapi/IPreparedModel.h> -#include <nnapi/OperandTypes.h> -#include <nnapi/Result.h> -#include <nnapi/Types.h> -#include <nnapi/Validation.h> - -#include <algorithm> -#include <any> -#include <chrono> -#include <functional> -#include <iterator> -#include <memory> -#include <optional> -#include <set> -#include <string> -#include <utility> -#include <vector> - -namespace android::nn::sample { - -LimitedSupportDevice::LimitedSupportDevice(SharedDevice device, Capabilities capabilities, - SupportedOperationsFunction supportedOperationsFunction) - : kDevice(std::move(device)), - kCapabilities(std::move(capabilities)), - kSupportedOperationsFunction(std::move(supportedOperationsFunction)) { - CHECK(kDevice != nullptr); - CHECK(kSupportedOperationsFunction != nullptr); - const auto result = validate(kCapabilities); - CHECK(result.has_value()) << result.error(); -} - -const std::string& LimitedSupportDevice::getName() const { - return kDevice->getName(); -} - -const std::string& LimitedSupportDevice::getVersionString() const { - return kDevice->getVersionString(); -} - -Version LimitedSupportDevice::getFeatureLevel() const { - return kDevice->getFeatureLevel(); -} - -DeviceType LimitedSupportDevice::getType() const { - return kDevice->getType(); -} - -const std::vector<Extension>& LimitedSupportDevice::getSupportedExtensions() const { - return kDevice->getSupportedExtensions(); -} - -const Capabilities& LimitedSupportDevice::getCapabilities() const { - return kCapabilities; -} - -std::pair<uint32_t, uint32_t> LimitedSupportDevice::getNumberOfCacheFilesNeeded() const { - return kDevice->getNumberOfCacheFilesNeeded(); -} - -GeneralResult<void> LimitedSupportDevice::wait() const { - return kDevice->wait(); -} - -GeneralResult<std::vector<bool>> LimitedSupportDevice::getSupportedOperations( - const Model& model) const { - return kSupportedOperationsFunction(model); -} - -GeneralResult<SharedPreparedModel> LimitedSupportDevice::prepareModel( - const Model& model, ExecutionPreference preference, Priority priority, - OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache, - const std::vector<SharedHandle>& dataCache, const CacheToken& token) const { - const auto supportedOperations = NN_TRY(kSupportedOperationsFunction(model)); - constexpr auto id = [](auto v) { return v; }; - if (!std::all_of(supportedOperations.begin(), supportedOperations.end(), id)) { - return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Not all operations are supported"; - } - return kDevice->prepareModel(model, preference, priority, deadline, modelCache, dataCache, - token); -} - -GeneralResult<SharedPreparedModel> LimitedSupportDevice::prepareModelFromCache( - OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache, - const std::vector<SharedHandle>& dataCache, const CacheToken& token) const { - return kDevice->prepareModelFromCache(deadline, modelCache, dataCache, token); -} - -GeneralResult<SharedBuffer> LimitedSupportDevice::allocate( - const BufferDesc& desc, const std::vector<SharedPreparedModel>& preparedModels, - const std::vector<BufferRole>& inputRoles, - const std::vector<BufferRole>& outputRoles) const { - return kDevice->allocate(desc, preparedModels, inputRoles, outputRoles); -} - -} // namespace android::nn::sample
diff --git a/driver/sample/LimitedSupportDevice.h b/driver/sample/LimitedSupportDevice.h deleted file mode 100644 index 126dd20..0000000 --- a/driver/sample/LimitedSupportDevice.h +++ /dev/null
@@ -1,85 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_LIMITED_SUPPORT_DEVICE_H -#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_LIMITED_SUPPORT_DEVICE_H - -#include <nnapi/IBuffer.h> -#include <nnapi/IDevice.h> -#include <nnapi/OperandTypes.h> -#include <nnapi/Result.h> -#include <nnapi/Types.h> - -#include <functional> -#include <memory> -#include <optional> -#include <string> -#include <utility> -#include <vector> - -namespace android::nn::sample { - -// Class that adapts an IDevice object to allow the caller to: -// (1) provide custom Capabilities -// (2) customize the behavior of getSupportedOperations -class LimitedSupportDevice final : public IDevice { - public: - using SupportedOperationsFunction = - std::function<GeneralResult<std::vector<bool>>(const Model&)>; - - // Precondition: device != nullptr - // Precondition: validate(capabilities).ok() - // Precondition: supportedOperationsFunction != nullptr - LimitedSupportDevice(SharedDevice device, Capabilities capabilities, - SupportedOperationsFunction supportedOperationsFunction); - - const std::string& getName() const override; - const std::string& getVersionString() const override; - Version getFeatureLevel() const override; - DeviceType getType() const override; - const std::vector<Extension>& getSupportedExtensions() const override; - const Capabilities& getCapabilities() const override; - std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override; - - GeneralResult<void> wait() const override; - - GeneralResult<std::vector<bool>> getSupportedOperations(const Model& model) const override; - - GeneralResult<SharedPreparedModel> prepareModel(const Model& model, - ExecutionPreference preference, - Priority priority, OptionalTimePoint deadline, - const std::vector<SharedHandle>& modelCache, - const std::vector<SharedHandle>& dataCache, - const CacheToken& token) const override; - - GeneralResult<SharedPreparedModel> prepareModelFromCache( - OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache, - const std::vector<SharedHandle>& dataCache, const CacheToken& token) const override; - - GeneralResult<SharedBuffer> allocate(const BufferDesc& desc, - const std::vector<SharedPreparedModel>& preparedModels, - const std::vector<BufferRole>& inputRoles, - const std::vector<BufferRole>& outputRoles) const override; - - private: - const nn::SharedDevice kDevice; - const Capabilities kCapabilities; - const SupportedOperationsFunction kSupportedOperationsFunction; -}; - -} // namespace android::nn::sample - -#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_DRIVER_SAMPLE_LIMITED_SUPPORT_DEVICE_H
diff --git a/driver/sample/SampleDriver.cpp b/driver/sample/SampleDriver.cpp index f7be5d4..e0c9ad9 100644 --- a/driver/sample/SampleDriver.cpp +++ b/driver/sample/SampleDriver.cpp
@@ -18,17 +18,10 @@ #include "SampleDriver.h" -#include <CpuExecutor.h> -#include <ExecutionBurstServer.h> -#include <HalBufferTracker.h> -#include <HalInterfaces.h> -#include <Tracing.h> -#include <ValidateHal.h> #include <android-base/logging.h> #include <android-base/properties.h> +#include <android/sync.h> #include <hidl/LegacySupport.h> -#include <nnapi/Types.h> -#include <nnapi/hal/1.3/Conversions.h> #include <algorithm> #include <chrono> @@ -41,7 +34,13 @@ #include <utility> #include <vector> +#include "BufferTracker.h" +#include "CpuExecutor.h" +#include "ExecutionBurstServer.h" +#include "HalInterfaces.h" #include "SampleDriverUtils.h" +#include "Tracing.h" +#include "ValidateHal.h" namespace android { namespace nn { @@ -49,192 +48,188 @@ namespace { -uint64_t microsecondsDuration(TimePoint end, TimePoint start) { - using Microseconds = std::chrono::duration<uint64_t, std::micro>; - return std::chrono::duration_cast<Microseconds>(end - start).count(); +using namespace hal; + +using time_point = std::chrono::steady_clock::time_point; + +auto now() { + return std::chrono::steady_clock::now(); +}; + +auto microsecondsDuration(decltype(now()) end, decltype(now()) start) { + return std::chrono::duration_cast<std::chrono::microseconds>(end - start).count(); }; } // namespace -static const V1_2::Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; +static const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; -hardware::Return<void> SampleDriver::getCapabilities(getCapabilities_cb cb) { +Return<void> SampleDriver::getCapabilities(getCapabilities_cb cb) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, "SampleDriver::getCapabilities"); - return getCapabilities_1_3( - [&](V1_3::ErrorStatus error, const V1_3::Capabilities& capabilities) { - // TODO(dgross): Do we need to check compliantWithV1_0(capabilities)? - cb(convertToV1_0(error), convertToV1_0(capabilities)); - }); + return getCapabilities_1_3([&](ErrorStatus error, const V1_3::Capabilities& capabilities) { + // TODO(dgross): Do we need to check compliantWithV1_0(capabilities)? + cb(convertToV1_0(error), convertToV1_0(capabilities)); + }); } -hardware::Return<void> SampleDriver::getCapabilities_1_1(getCapabilities_1_1_cb cb) { +Return<void> SampleDriver::getCapabilities_1_1(getCapabilities_1_1_cb cb) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, "SampleDriver::getCapabilities_1_1"); - return getCapabilities_1_3( - [&](V1_3::ErrorStatus error, const V1_3::Capabilities& capabilities) { - // TODO(dgross): Do we need to check compliantWithV1_1(capabilities)? - cb(convertToV1_0(error), convertToV1_1(capabilities)); - }); + return getCapabilities_1_3([&](ErrorStatus error, const V1_3::Capabilities& capabilities) { + // TODO(dgross): Do we need to check compliantWithV1_1(capabilities)? + cb(convertToV1_0(error), convertToV1_1(capabilities)); + }); } -hardware::Return<void> SampleDriver::getCapabilities_1_2(getCapabilities_1_2_cb cb) { +Return<void> SampleDriver::getCapabilities_1_2(getCapabilities_1_2_cb cb) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, "SampleDriver::getCapabilities_1_2"); - return getCapabilities_1_3( - [&](V1_3::ErrorStatus error, const V1_3::Capabilities& capabilities) { - // TODO(dgross): Do we need to check compliantWithV1_2(capabilities)? - cb(convertToV1_0(error), convertToV1_2(capabilities)); - }); + return getCapabilities_1_3([&](ErrorStatus error, const V1_3::Capabilities& capabilities) { + // TODO(dgross): Do we need to check compliantWithV1_2(capabilities)? + cb(convertToV1_0(error), convertToV1_2(capabilities)); + }); } -hardware::Return<void> SampleDriver::getVersionString(getVersionString_cb cb) { +Return<void> SampleDriver::getVersionString(getVersionString_cb cb) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, "SampleDriver::getVersionString"); cb(V1_0::ErrorStatus::NONE, "JUST_AN_EXAMPLE"); - return hardware::Void(); + return Void(); } -hardware::Return<void> SampleDriver::getType(getType_cb cb) { +Return<void> SampleDriver::getType(getType_cb cb) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, "SampleDriver::getType"); cb(V1_0::ErrorStatus::NONE, V1_2::DeviceType::CPU); - return hardware::Void(); + return Void(); } -hardware::Return<void> SampleDriver::getSupportedExtensions(getSupportedExtensions_cb cb) { +Return<void> SampleDriver::getSupportedExtensions(getSupportedExtensions_cb cb) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, "SampleDriver::getSupportedExtensions"); cb(V1_0::ErrorStatus::NONE, {/* No extensions. */}); - return hardware::Void(); + return Void(); } -hardware::Return<void> SampleDriver::getSupportedOperations(const V1_0::Model& model, - getSupportedOperations_cb cb) { +Return<void> SampleDriver::getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb cb) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::getSupportedOperations"); if (!validateModel(model)) { VLOG(DRIVER) << "getSupportedOperations"; cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {}); - return hardware::Void(); + return Void(); } - return getSupportedOperations_1_3( - convertToV1_3(model), - [&](V1_3::ErrorStatus status, const hardware::hidl_vec<bool>& supported) { - cb(convertToV1_0(status), supported); - }); + return getSupportedOperations_1_3(convertToV1_3(model), + [&](ErrorStatus status, const hidl_vec<bool>& supported) { + cb(convertToV1_0(status), supported); + }); } -hardware::Return<void> SampleDriver::getSupportedOperations_1_1(const V1_1::Model& model, - getSupportedOperations_1_1_cb cb) { +Return<void> SampleDriver::getSupportedOperations_1_1(const V1_1::Model& model, + getSupportedOperations_1_1_cb cb) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::getSupportedOperations_1_1"); if (!validateModel(model)) { VLOG(DRIVER) << "getSupportedOperations_1_1"; cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {}); - return hardware::Void(); + return Void(); } - return getSupportedOperations_1_3( - convertToV1_3(model), - [&](V1_3::ErrorStatus status, const hardware::hidl_vec<bool>& supported) { - cb(convertToV1_0(status), supported); - }); + return getSupportedOperations_1_3(convertToV1_3(model), + [&](ErrorStatus status, const hidl_vec<bool>& supported) { + cb(convertToV1_0(status), supported); + }); } -hardware::Return<void> SampleDriver::getSupportedOperations_1_2(const V1_2::Model& model, - getSupportedOperations_1_2_cb cb) { +Return<void> SampleDriver::getSupportedOperations_1_2(const V1_2::Model& model, + getSupportedOperations_1_2_cb cb) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::getSupportedOperations_1_2"); if (!validateModel(model)) { VLOG(DRIVER) << "getSupportedOperations_1_2"; cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {}); - return hardware::Void(); + return Void(); } - return getSupportedOperations_1_3( - convertToV1_3(model), - [&](V1_3::ErrorStatus status, const hardware::hidl_vec<bool>& supported) { - cb(convertToV1_0(status), supported); - }); + return getSupportedOperations_1_3(convertToV1_3(model), + [&](ErrorStatus status, const hidl_vec<bool>& supported) { + cb(convertToV1_0(status), supported); + }); } -hardware::Return<void> SampleDriver::getNumberOfCacheFilesNeeded( - getNumberOfCacheFilesNeeded_cb cb) { +Return<void> SampleDriver::getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, "SampleDriver::getNumberOfCacheFilesNeeded"); // Set both numbers to be 0 for cache not supported. cb(V1_0::ErrorStatus::NONE, /*numModelCache=*/0, /*numDataCache=*/0); - return hardware::Void(); + return Void(); } -hardware::Return<V1_0::ErrorStatus> SampleDriver::prepareModel( +Return<V1_0::ErrorStatus> SampleDriver::prepareModel( const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel"); - const V1_3::ErrorStatus status = - prepareModelBase(model, this, V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, - kDefaultPriority13, {}, callback); + const ErrorStatus status = prepareModelBase( + model, this, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, {}, callback); return convertToV1_0(status); } -hardware::Return<V1_0::ErrorStatus> SampleDriver::prepareModel_1_1( - const V1_1::Model& model, V1_1::ExecutionPreference preference, +Return<V1_0::ErrorStatus> SampleDriver::prepareModel_1_1( + const V1_1::Model& model, ExecutionPreference preference, const sp<V1_0::IPreparedModelCallback>& callback) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel_1_1"); - const V1_3::ErrorStatus status = - prepareModelBase(model, this, preference, kDefaultPriority13, {}, callback); + const ErrorStatus status = + prepareModelBase(model, this, preference, kDefaultPriority, {}, callback); return convertToV1_0(status); } -hardware::Return<V1_0::ErrorStatus> SampleDriver::prepareModel_1_2( - const V1_2::Model& model, V1_1::ExecutionPreference preference, - const hardware::hidl_vec<hardware::hidl_handle>&, - const hardware::hidl_vec<hardware::hidl_handle>&, const HalCacheToken&, +Return<V1_0::ErrorStatus> SampleDriver::prepareModel_1_2( + const V1_2::Model& model, ExecutionPreference preference, const hidl_vec<hidl_handle>&, + const hidl_vec<hidl_handle>&, const CacheToken&, const sp<V1_2::IPreparedModelCallback>& callback) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel_1_2"); - const V1_3::ErrorStatus status = - prepareModelBase(model, this, preference, kDefaultPriority13, {}, callback); + const ErrorStatus status = + prepareModelBase(model, this, preference, kDefaultPriority, {}, callback); return convertToV1_0(status); } -hardware::Return<V1_3::ErrorStatus> SampleDriver::prepareModel_1_3( - const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, - const V1_3::OptionalTimePoint& deadline, const hardware::hidl_vec<hardware::hidl_handle>&, - const hardware::hidl_vec<hardware::hidl_handle>&, const HalCacheToken&, +Return<V1_3::ErrorStatus> SampleDriver::prepareModel_1_3( + const V1_3::Model& model, ExecutionPreference preference, Priority priority, + const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>&, + const hidl_vec<hidl_handle>&, const CacheToken&, const sp<V1_3::IPreparedModelCallback>& callback) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel_1_3"); return prepareModelBase(model, this, preference, priority, deadline, callback); } -hardware::Return<V1_0::ErrorStatus> SampleDriver::prepareModelFromCache( - const hardware::hidl_vec<hardware::hidl_handle>&, - const hardware::hidl_vec<hardware::hidl_handle>&, const HalCacheToken&, +Return<V1_0::ErrorStatus> SampleDriver::prepareModelFromCache( + const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&, const sp<V1_2::IPreparedModelCallback>& callback) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModelFromCache"); - notify(callback, V1_3::ErrorStatus::GENERAL_FAILURE, nullptr); + notify(callback, ErrorStatus::GENERAL_FAILURE, nullptr); return V1_0::ErrorStatus::GENERAL_FAILURE; } -hardware::Return<V1_3::ErrorStatus> SampleDriver::prepareModelFromCache_1_3( - const V1_3::OptionalTimePoint& /*deadline*/, - const hardware::hidl_vec<hardware::hidl_handle>&, - const hardware::hidl_vec<hardware::hidl_handle>&, const HalCacheToken&, +Return<ErrorStatus> SampleDriver::prepareModelFromCache_1_3( + const OptionalTimePoint& /*deadline*/, const hidl_vec<hidl_handle>&, + const hidl_vec<hidl_handle>&, const CacheToken&, const sp<V1_3::IPreparedModelCallback>& callback) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModelFromCache_1_3"); - notify(callback, V1_3::ErrorStatus::GENERAL_FAILURE, nullptr); - return V1_3::ErrorStatus::GENERAL_FAILURE; + notify(callback, ErrorStatus::GENERAL_FAILURE, nullptr); + return ErrorStatus::GENERAL_FAILURE; } -hardware::Return<V1_0::DeviceStatus> SampleDriver::getStatus() { +Return<DeviceStatus> SampleDriver::getStatus() { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_UNSPECIFIED, "SampleDriver::getStatus"); VLOG(DRIVER) << "getStatus()"; - return V1_0::DeviceStatus::AVAILABLE; + return DeviceStatus::AVAILABLE; } // Safely downcast an IPreparedModel object to SamplePreparedModel. // This function will return nullptr if the IPreparedModel object is not originated from the sample // driver process. static const SamplePreparedModel* castToSamplePreparedModel( - const sp<V1_3::IPreparedModel>& preparedModel) { + const sp<IPreparedModel>& preparedModel) { if (preparedModel->isRemote()) { return nullptr; } else { @@ -244,15 +239,14 @@ } } -hardware::Return<void> SampleDriver::allocate( - const V1_3::BufferDesc& desc, - const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels, - const hardware::hidl_vec<V1_3::BufferRole>& inputRoles, - const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) { +Return<void> SampleDriver::allocate(const V1_3::BufferDesc& desc, + const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels, + const hidl_vec<V1_3::BufferRole>& inputRoles, + const hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) { constexpr uint32_t kInvalidBufferToken = 0; VLOG(DRIVER) << "SampleDriver::allocate"; - std::set<HalPreparedModelRole> roles; + std::set<PreparedModelRole> roles; V1_3::Operand operand; auto getModel = [](const sp<V1_3::IPreparedModel>& preparedModel) -> const V1_3::Model* { const auto* samplePreparedModel = castToSamplePreparedModel(preparedModel); @@ -265,14 +259,14 @@ if (!validateMemoryDesc(desc, preparedModels, inputRoles, outputRoles, getModel, &roles, &operand)) { LOG(ERROR) << "SampleDriver::allocate -- validation failed."; - cb(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr, kInvalidBufferToken); - return hardware::Void(); + cb(ErrorStatus::INVALID_ARGUMENT, nullptr, kInvalidBufferToken); + return Void(); } if (isExtensionOperandType(operand.type)) { LOG(ERROR) << "SampleDriver::allocate -- does not support extension type."; - cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken); - return hardware::Void(); + cb(ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken); + return Void(); } // TODO(xusongw): Support allocating buffers with unknown dimensions or rank. @@ -281,30 +275,29 @@ << ", dimensions = " << toString(operand.dimensions) << ", size = " << size; if (size == 0) { LOG(ERROR) << "SampleDriver::allocate -- does not support dynamic output shape."; - cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken); - return hardware::Void(); + cb(ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken); + return Void(); } - auto bufferWrapper = - HalManagedBuffer::create(size, std::move(roles), uncheckedConvert(operand)); + auto bufferWrapper = ManagedBuffer::create(size, std::move(roles), std::move(operand)); if (bufferWrapper == nullptr) { LOG(ERROR) << "SampleDriver::allocate -- not enough memory."; - cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken); - return hardware::Void(); + cb(ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken); + return Void(); } - auto token = mHalBufferTracker->add(bufferWrapper); + auto token = mBufferTracker->add(bufferWrapper); if (token == nullptr) { - LOG(ERROR) << "SampleDriver::allocate -- HalBufferTracker returned invalid token."; - cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken); - return hardware::Void(); + LOG(ERROR) << "SampleDriver::allocate -- BufferTracker returned invalid token."; + cb(ErrorStatus::GENERAL_FAILURE, nullptr, kInvalidBufferToken); + return Void(); } const uint32_t tokenValue = token->get(); sp<SampleBuffer> sampleBuffer = new SampleBuffer(std::move(bufferWrapper), std::move(token)); VLOG(DRIVER) << "SampleDriver::allocate -- successfully allocates the requested memory"; - cb(V1_3::ErrorStatus::NONE, std::move(sampleBuffer), tokenValue); - return hardware::Void(); + cb(ErrorStatus::NONE, std::move(sampleBuffer), tokenValue); + return Void(); } int SampleDriver::run() { @@ -326,45 +319,43 @@ dstPool.flush(); } -hardware::Return<V1_3::ErrorStatus> SampleBuffer::copyTo(const hardware::hidl_memory& dst) { - const auto dstPool = RunTimePoolInfo::createFromMemory(uncheckedConvert(dst)); +Return<ErrorStatus> SampleBuffer::copyTo(const hidl_memory& dst) { + const auto dstPool = RunTimePoolInfo::createFromHidlMemory(dst); if (!dstPool.has_value()) { LOG(ERROR) << "SampleBuffer::copyTo -- unable to map dst memory."; - return V1_3::ErrorStatus::GENERAL_FAILURE; + return ErrorStatus::GENERAL_FAILURE; } - const V1_3::ErrorStatus validationStatus = - convertToV1_3(kBuffer->validateCopyTo(dstPool->getSize())); - if (validationStatus != V1_3::ErrorStatus::NONE) { + const ErrorStatus validationStatus = kBuffer->validateCopyTo(dstPool->getSize()); + if (validationStatus != ErrorStatus::NONE) { return validationStatus; } const auto srcPool = kBuffer->createRunTimePoolInfo(); copyRunTimePoolInfos(srcPool, dstPool.value()); - return V1_3::ErrorStatus::NONE; + return ErrorStatus::NONE; } -static V1_3::ErrorStatus copyFromInternal(const hardware::hidl_memory& src, - const hardware::hidl_vec<uint32_t>& dimensions, - const std::shared_ptr<HalManagedBuffer>& bufferWrapper) { +static ErrorStatus copyFromInternal(const hidl_memory& src, const hidl_vec<uint32_t>& dimensions, + const std::shared_ptr<ManagedBuffer>& bufferWrapper) { CHECK(bufferWrapper != nullptr); - const auto srcPool = RunTimePoolInfo::createFromMemory(uncheckedConvert(src)); + const auto srcPool = RunTimePoolInfo::createFromHidlMemory(src); if (!srcPool.has_value()) { LOG(ERROR) << "SampleBuffer::copyFrom -- unable to map src memory."; - return V1_3::ErrorStatus::GENERAL_FAILURE; + return ErrorStatus::GENERAL_FAILURE; } - const V1_3::ErrorStatus validationStatus = - convertToV1_3(bufferWrapper->validateCopyFrom(dimensions, srcPool->getSize())); - if (validationStatus != V1_3::ErrorStatus::NONE) { + const ErrorStatus validationStatus = + bufferWrapper->validateCopyFrom(dimensions, srcPool->getSize()); + if (validationStatus != ErrorStatus::NONE) { return validationStatus; } const auto dstPool = bufferWrapper->createRunTimePoolInfo(); copyRunTimePoolInfos(srcPool.value(), dstPool); - return V1_3::ErrorStatus::NONE; + return ErrorStatus::NONE; } -hardware::Return<V1_3::ErrorStatus> SampleBuffer::copyFrom( - const hardware::hidl_memory& src, const hardware::hidl_vec<uint32_t>& dimensions) { +Return<ErrorStatus> SampleBuffer::copyFrom(const hidl_memory& src, + const hidl_vec<uint32_t>& dimensions) { const auto status = copyFromInternal(src, dimensions, kBuffer); - if (status == V1_3::ErrorStatus::NONE) { + if (status == ErrorStatus::NONE) { kBuffer->updateDimensions(dimensions); kBuffer->setInitialized(true); } else { @@ -374,38 +365,37 @@ } bool SamplePreparedModel::initialize() { - return setRunTimePoolInfosFromCanonicalMemories(&mPoolInfos, uncheckedConvert(mModel.pools)); + return setRunTimePoolInfosFromHidlMemories(&mPoolInfos, mModel.pools); } -static std::tuple<V1_3::ErrorStatus, std::vector<RunTimePoolInfo>, - std::vector<std::shared_ptr<HalManagedBuffer>>> -createRunTimePoolInfos(const V1_3::Request& request, const SampleDriver& driver, +static std::tuple<ErrorStatus, std::vector<RunTimePoolInfo>, + std::vector<std::shared_ptr<ManagedBuffer>>> +createRunTimePoolInfos(const Request& request, const SampleDriver& driver, const SamplePreparedModel* preparedModel) { std::vector<RunTimePoolInfo> requestPoolInfos; - std::vector<std::shared_ptr<HalManagedBuffer>> bufferWrappers; + std::vector<std::shared_ptr<ManagedBuffer>> bufferWrappers; requestPoolInfos.reserve(request.pools.size()); bufferWrappers.reserve(request.pools.size()); for (uint32_t i = 0; i < request.pools.size(); i++) { auto& pool = request.pools[i]; switch (pool.getDiscriminator()) { - case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory: { - auto buffer = - RunTimePoolInfo::createFromMemory(uncheckedConvert(pool.hidlMemory())); + case Request::MemoryPool::hidl_discriminator::hidlMemory: { + auto buffer = RunTimePoolInfo::createFromHidlMemory(pool.hidlMemory()); if (!buffer.has_value()) { LOG(ERROR) << "createRuntimeMemoriesFromMemoryPools -- could not map pools"; - return {V1_3::ErrorStatus::GENERAL_FAILURE, {}, {}}; + return {ErrorStatus::GENERAL_FAILURE, {}, {}}; } requestPoolInfos.push_back(std::move(*buffer)); bufferWrappers.push_back(nullptr); } break; - case V1_3::Request::MemoryPool::hidl_discriminator::token: { - auto bufferWrapper = driver.getHalBufferTracker()->get(pool.token()); + case Request::MemoryPool::hidl_discriminator::token: { + auto bufferWrapper = driver.getBufferTracker()->get(pool.token()); if (bufferWrapper == nullptr) { - return {V1_3::ErrorStatus::INVALID_ARGUMENT, {}, {}}; + return {ErrorStatus::INVALID_ARGUMENT, {}, {}}; } - const auto validationStatus = convertToV1_3(bufferWrapper->validateRequest( - i, uncheckedConvert(request), preparedModel)); - if (validationStatus != V1_3::ErrorStatus::NONE) { + const auto validationStatus = + bufferWrapper->validateRequest(i, request, preparedModel); + if (validationStatus != ErrorStatus::NONE) { return {validationStatus, {}, {}}; } requestPoolInfos.push_back(bufferWrapper->createRunTimePoolInfo()); @@ -413,62 +403,63 @@ } break; } } - return {V1_3::ErrorStatus::NONE, std::move(requestPoolInfos), std::move(bufferWrappers)}; + return {ErrorStatus::NONE, std::move(requestPoolInfos), std::move(bufferWrappers)}; } -static V1_3::ErrorStatus updateDeviceMemories( - V1_3::ErrorStatus status, const V1_3::Request& request, - const std::vector<std::shared_ptr<HalManagedBuffer>>& bufferWrappers, - const hardware::hidl_vec<V1_2::OutputShape>& outputShapes) { - if (status == V1_3::ErrorStatus::NONE) { +static ErrorStatus updateDeviceMemories( + ErrorStatus status, const Request& request, + const std::vector<std::shared_ptr<ManagedBuffer>>& bufferWrappers, + const hidl_vec<OutputShape>& outputShapes) { + if (status == ErrorStatus::NONE) { for (uint32_t i = 0; i < request.outputs.size(); i++) { const uint32_t poolIndex = request.outputs[i].location.poolIndex; const auto& pool = request.pools[poolIndex]; - if (pool.getDiscriminator() == V1_3::Request::MemoryPool::hidl_discriminator::token) { + if (pool.getDiscriminator() == Request::MemoryPool::hidl_discriminator::token) { if (!bufferWrappers[poolIndex]->updateDimensions(outputShapes[i].dimensions)) { - return V1_3::ErrorStatus::GENERAL_FAILURE; + return ErrorStatus::GENERAL_FAILURE; } } } for (uint32_t i = 0; i < request.outputs.size(); i++) { const uint32_t poolIndex = request.outputs[i].location.poolIndex; const auto& pool = request.pools[poolIndex]; - if (pool.getDiscriminator() == V1_3::Request::MemoryPool::hidl_discriminator::token) { + if (pool.getDiscriminator() == Request::MemoryPool::hidl_discriminator::token) { bufferWrappers[poolIndex]->setInitialized(true); } } - } else if (status == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + } else if (status == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { // If CpuExecutor reports OUTPUT_INSUFFCIENT_SIZE on a device memory, this is because the // dimensions of the device memory are incorrectly specified. The driver should return // GENERAL_FAILURE instead in this case. for (uint32_t i = 0; i < request.outputs.size(); i++) { const uint32_t poolIndex = request.outputs[i].location.poolIndex; const auto& pool = request.pools[poolIndex]; - if (pool.getDiscriminator() == V1_3::Request::MemoryPool::hidl_discriminator::token) { + if (pool.getDiscriminator() == Request::MemoryPool::hidl_discriminator::token) { if (!outputShapes[i].isSufficient) { LOG(ERROR) << "Invalid dimensions for output " << i << ": actual shape = " << toString(outputShapes[i].dimensions); - return V1_3::ErrorStatus::GENERAL_FAILURE; + return ErrorStatus::GENERAL_FAILURE; } } } } - return V1_3::ErrorStatus::NONE; + return ErrorStatus::NONE; } template <typename T_IExecutionCallback> -void asyncExecute(const V1_3::Request& request, V1_2::MeasureTiming measure, TimePoint driverStart, - const V1_3::Model& model, const SampleDriver& driver, +void asyncExecute(const Request& request, MeasureTiming measure, time_point driverStart, + const Model& model, const SampleDriver& driver, const SamplePreparedModel* preparedModel, - const std::vector<RunTimePoolInfo>& poolInfos, const OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + const std::vector<RunTimePoolInfo>& poolInfos, + const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, const sp<T_IExecutionCallback>& callback) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "SampleDriver::asyncExecute"); const auto [poolStatus, requestPoolInfos, bufferWrappers] = createRunTimePoolInfos(request, driver, preparedModel); - if (poolStatus != V1_3::ErrorStatus::NONE) { + if (poolStatus != ErrorStatus::NONE) { notify(callback, poolStatus, {}, kNoTiming); return; } @@ -477,34 +468,32 @@ "SampleDriver::asyncExecute"); CpuExecutor executor = driver.getExecutor(); if (loopTimeoutDuration.getDiscriminator() != - V1_3::OptionalTimeoutDuration::hidl_discriminator::none) { + OptionalTimeoutDuration::hidl_discriminator::none) { executor.setLoopTimeout(loopTimeoutDuration.nanoseconds()); } if (deadline.has_value()) { executor.setDeadline(*deadline); } - TimePoint driverEnd, deviceStart, deviceEnd; - if (measure == V1_2::MeasureTiming::YES) deviceStart = Clock::now(); - int n = executor.run(uncheckedConvert(model), uncheckedConvert(request), poolInfos, - requestPoolInfos); - if (measure == V1_2::MeasureTiming::YES) deviceEnd = Clock::now(); + time_point driverEnd, deviceStart, deviceEnd; + if (measure == MeasureTiming::YES) deviceStart = now(); + int n = executor.run(model, request, poolInfos, requestPoolInfos); + if (measure == MeasureTiming::YES) deviceEnd = now(); VLOG(DRIVER) << "executor.run returned " << n; - V1_3::ErrorStatus executionStatus = convertResultCodeToHalErrorStatus(n); - hardware::hidl_vec<V1_2::OutputShape> outputShapes = convertToV1_2(executor.getOutputShapes()); + ErrorStatus executionStatus = convertResultCodeToErrorStatus(n); + hidl_vec<OutputShape> outputShapes = executor.getOutputShapes(); // Update device memory metadata. - const V1_3::ErrorStatus updateStatus = + const ErrorStatus updateStatus = updateDeviceMemories(executionStatus, request, bufferWrappers, outputShapes); - if (updateStatus != V1_3::ErrorStatus::NONE) { + if (updateStatus != ErrorStatus::NONE) { notify(callback, updateStatus, {}, kNoTiming); return; } - if (measure == V1_2::MeasureTiming::YES && executionStatus == V1_3::ErrorStatus::NONE) { - driverEnd = Clock::now(); - V1_2::Timing timing = { - .timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)), - .timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))}; + if (measure == MeasureTiming::YES && executionStatus == ErrorStatus::NONE) { + driverEnd = now(); + Timing timing = {.timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)), + .timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))}; VLOG(DRIVER) << "SampleDriver::asyncExecute timing = " << toString(timing); notify(callback, executionStatus, outputShapes, timing); } else { @@ -513,31 +502,30 @@ } template <typename T_IExecutionCallback> -V1_3::ErrorStatus executeBase(const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::Model& model, const SampleDriver& driver, - const SamplePreparedModel* preparedModel, - const std::vector<RunTimePoolInfo>& poolInfos, - const V1_3::OptionalTimePoint& halDeadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - const sp<T_IExecutionCallback>& callback) { +ErrorStatus executeBase(const Request& request, MeasureTiming measure, const Model& model, + const SampleDriver& driver, const SamplePreparedModel* preparedModel, + const std::vector<RunTimePoolInfo>& poolInfos, + const OptionalTimePoint& halDeadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + const sp<T_IExecutionCallback>& callback) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, "SampleDriver::executeBase"); VLOG(DRIVER) << "executeBase(" << SHOW_IF_DEBUG(toString(request)) << ")"; - TimePoint driverStart; - if (measure == V1_2::MeasureTiming::YES) driverStart = Clock::now(); + time_point driverStart; + if (measure == MeasureTiming::YES) driverStart = now(); if (callback.get() == nullptr) { LOG(ERROR) << "invalid callback passed to executeBase"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; + return ErrorStatus::INVALID_ARGUMENT; } if (!validateRequest(request, model)) { - notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming); - return V1_3::ErrorStatus::INVALID_ARGUMENT; + notify(callback, ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming); + return ErrorStatus::INVALID_ARGUMENT; } - const auto deadline = convert(halDeadline).value(); + const auto deadline = makeDeadline(halDeadline); if (hasDeadlinePassed(deadline)) { - notify(callback, V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming); - return V1_3::ErrorStatus::NONE; + notify(callback, ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming); + return ErrorStatus::NONE; } // This thread is intentionally detached because the sample driver service @@ -548,61 +536,57 @@ deadline, loopTimeoutDuration, callback); }).detach(); - return V1_3::ErrorStatus::NONE; + return ErrorStatus::NONE; } -hardware::Return<V1_0::ErrorStatus> SamplePreparedModel::execute( +Return<V1_0::ErrorStatus> SamplePreparedModel::execute( const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) { - const V1_3::ErrorStatus status = - executeBase(convertToV1_3(request), V1_2::MeasureTiming::NO, mModel, *mDriver, this, - mPoolInfos, {}, {}, callback); + const ErrorStatus status = executeBase(convertToV1_3(request), MeasureTiming::NO, mModel, + *mDriver, this, mPoolInfos, {}, {}, callback); return convertToV1_0(status); } -hardware::Return<V1_0::ErrorStatus> SamplePreparedModel::execute_1_2( - const V1_0::Request& request, V1_2::MeasureTiming measure, +Return<V1_0::ErrorStatus> SamplePreparedModel::execute_1_2( + const V1_0::Request& request, MeasureTiming measure, const sp<V1_2::IExecutionCallback>& callback) { - const V1_3::ErrorStatus status = executeBase(convertToV1_3(request), measure, mModel, *mDriver, - this, mPoolInfos, {}, {}, callback); + const ErrorStatus status = executeBase(convertToV1_3(request), measure, mModel, *mDriver, this, + mPoolInfos, {}, {}, callback); return convertToV1_0(status); } -hardware::Return<V1_3::ErrorStatus> SamplePreparedModel::execute_1_3( - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, +Return<V1_3::ErrorStatus> SamplePreparedModel::execute_1_3( + const V1_3::Request& request, MeasureTiming measure, const OptionalTimePoint& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, const sp<V1_3::IExecutionCallback>& callback) { return executeBase(request, measure, mModel, *mDriver, this, mPoolInfos, deadline, loopTimeoutDuration, callback); } -static std::tuple<V1_3::ErrorStatus, hardware::hidl_vec<V1_2::OutputShape>, V1_2::Timing> -executeSynchronouslyBase(const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::Model& model, const SampleDriver& driver, - const SamplePreparedModel* preparedModel, - const std::vector<RunTimePoolInfo>& poolInfos, - const V1_3::OptionalTimePoint& halDeadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration) { +static std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing> executeSynchronouslyBase( + const Request& request, MeasureTiming measure, const Model& model, + const SampleDriver& driver, const SamplePreparedModel* preparedModel, + const std::vector<RunTimePoolInfo>& poolInfos, const OptionalTimePoint& halDeadline, + const OptionalTimeoutDuration& loopTimeoutDuration) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, "SampleDriver::executeSynchronouslyBase"); VLOG(DRIVER) << "executeSynchronouslyBase(" << SHOW_IF_DEBUG(toString(request)) << ")"; - TimePoint driverStart, driverEnd, deviceStart, deviceEnd; - if (measure == V1_2::MeasureTiming::YES) driverStart = Clock::now(); + time_point driverStart, driverEnd, deviceStart, deviceEnd; + if (measure == MeasureTiming::YES) driverStart = now(); if (!validateRequest(request, model)) { - return {V1_3::ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming}; + return {ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming}; } - const auto deadline = convert(halDeadline).value(); + const auto deadline = makeDeadline(halDeadline); if (hasDeadlinePassed(deadline)) { - return {V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming}; + return {ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming}; } NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "SampleDriver::executeSynchronouslyBase"); const auto [poolStatus, requestPoolInfos, bufferWrappers] = createRunTimePoolInfos(request, driver, preparedModel); - if (poolStatus != V1_3::ErrorStatus::NONE) { + if (poolStatus != ErrorStatus::NONE) { return {poolStatus, {}, kNoTiming}; } @@ -610,151 +594,146 @@ "SampleDriver::executeSynchronouslyBase"); CpuExecutor executor = driver.getExecutor(); if (loopTimeoutDuration.getDiscriminator() != - V1_3::OptionalTimeoutDuration::hidl_discriminator::none) { + OptionalTimeoutDuration::hidl_discriminator::none) { executor.setLoopTimeout(loopTimeoutDuration.nanoseconds()); } if (deadline.has_value()) { executor.setDeadline(*deadline); } - if (measure == V1_2::MeasureTiming::YES) deviceStart = Clock::now(); - int n = executor.run(uncheckedConvert(model), uncheckedConvert(request), poolInfos, - requestPoolInfos); - if (measure == V1_2::MeasureTiming::YES) deviceEnd = Clock::now(); + if (measure == MeasureTiming::YES) deviceStart = now(); + int n = executor.run(model, request, poolInfos, requestPoolInfos); + if (measure == MeasureTiming::YES) deviceEnd = now(); VLOG(DRIVER) << "executor.run returned " << n; - V1_3::ErrorStatus executionStatus = convertResultCodeToHalErrorStatus(n); - hardware::hidl_vec<V1_2::OutputShape> outputShapes = convertToV1_2(executor.getOutputShapes()); + ErrorStatus executionStatus = convertResultCodeToErrorStatus(n); + hidl_vec<OutputShape> outputShapes = executor.getOutputShapes(); // Update device memory metadata. - const V1_3::ErrorStatus updateStatus = + const ErrorStatus updateStatus = updateDeviceMemories(executionStatus, request, bufferWrappers, outputShapes); - if (updateStatus != V1_3::ErrorStatus::NONE) { + if (updateStatus != ErrorStatus::NONE) { return {updateStatus, {}, kNoTiming}; } - if (measure == V1_2::MeasureTiming::YES && executionStatus == V1_3::ErrorStatus::NONE) { - driverEnd = Clock::now(); - V1_2::Timing timing = { - .timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)), - .timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))}; + if (measure == MeasureTiming::YES && executionStatus == ErrorStatus::NONE) { + driverEnd = now(); + Timing timing = {.timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)), + .timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))}; VLOG(DRIVER) << "executeSynchronouslyBase timing = " << toString(timing); return {executionStatus, std::move(outputShapes), timing}; } return {executionStatus, std::move(outputShapes), kNoTiming}; } -hardware::Return<void> SamplePreparedModel::executeSynchronously(const V1_0::Request& request, - V1_2::MeasureTiming measure, - executeSynchronously_cb cb) { +Return<void> SamplePreparedModel::executeSynchronously(const V1_0::Request& request, + MeasureTiming measure, + executeSynchronously_cb cb) { auto [status, outputShapes, timing] = executeSynchronouslyBase( convertToV1_3(request), measure, mModel, *mDriver, this, mPoolInfos, {}, {}); cb(convertToV1_0(status), std::move(outputShapes), timing); - return hardware::Void(); + return Void(); } -hardware::Return<void> SamplePreparedModel::executeSynchronously_1_3( - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, executeSynchronously_1_3_cb cb) { +Return<void> SamplePreparedModel::executeSynchronously_1_3( + const V1_3::Request& request, MeasureTiming measure, const OptionalTimePoint& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, executeSynchronously_1_3_cb cb) { auto [status, outputShapes, timing] = executeSynchronouslyBase( request, measure, mModel, *mDriver, this, mPoolInfos, deadline, loopTimeoutDuration); cb(status, std::move(outputShapes), timing); - return hardware::Void(); + return Void(); } // The sample driver will finish the execution and then return. -hardware::Return<void> SamplePreparedModel::executeFenced( - const V1_3::Request& request, const hardware::hidl_vec<hardware::hidl_handle>& waitFor, - V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& halDeadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - const V1_3::OptionalTimeoutDuration& duration, executeFenced_cb cb) { +Return<void> SamplePreparedModel::executeFenced( + const hal::Request& request, const hidl_vec<hidl_handle>& waitFor, MeasureTiming measure, + const OptionalTimePoint& halDeadline, const OptionalTimeoutDuration& loopTimeoutDuration, + const OptionalTimeoutDuration& duration, executeFenced_cb cb) { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, "SamplePreparedModel::executeFenced"); VLOG(DRIVER) << "executeFenced(" << SHOW_IF_DEBUG(toString(request)) << ")"; - TimePoint driverStart, driverEnd, deviceStart, deviceEnd; - if (measure == V1_2::MeasureTiming::YES) driverStart = Clock::now(); + time_point driverStart, driverEnd, deviceStart, deviceEnd; + if (measure == MeasureTiming::YES) driverStart = now(); if (!validateRequest(request, mModel, /*allowUnspecifiedOutput=*/false)) { - cb(V1_3::ErrorStatus::INVALID_ARGUMENT, hardware::hidl_handle(nullptr), nullptr); - return hardware::Void(); + cb(ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr); + return Void(); } - const auto deadline = convert(halDeadline).value(); + const auto deadline = makeDeadline(halDeadline); if (hasDeadlinePassed(deadline)) { - cb(V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, hardware::hidl_handle(nullptr), nullptr); - return hardware::Void(); + cb(ErrorStatus::MISSED_DEADLINE_PERSISTENT, hidl_handle(nullptr), nullptr); + return Void(); } // Wait for the dependent events to signal for (const auto& fenceHandle : waitFor) { if (!fenceHandle.getNativeHandle()) { - cb(V1_3::ErrorStatus::INVALID_ARGUMENT, hardware::hidl_handle(nullptr), nullptr); - return hardware::Void(); + cb(ErrorStatus::INVALID_ARGUMENT, hidl_handle(nullptr), nullptr); + return Void(); } int syncFenceFd = fenceHandle.getNativeHandle()->data[0]; if (syncWait(syncFenceFd, -1) != FenceState::SIGNALED) { LOG(ERROR) << "syncWait failed"; - cb(V1_3::ErrorStatus::GENERAL_FAILURE, hardware::hidl_handle(nullptr), nullptr); - return hardware::Void(); + cb(ErrorStatus::GENERAL_FAILURE, hidl_handle(nullptr), nullptr); + return Void(); } } // Update deadline if the timeout duration is closer than the deadline. auto closestDeadline = deadline; - if (duration.getDiscriminator() != V1_3::OptionalTimeoutDuration::hidl_discriminator::none) { + if (duration.getDiscriminator() != OptionalTimeoutDuration::hidl_discriminator::none) { const auto timeoutDurationDeadline = makeDeadline(duration.nanoseconds()); if (!closestDeadline.has_value() || *closestDeadline > timeoutDurationDeadline) { closestDeadline = timeoutDurationDeadline; } } - TimePoint driverStartAfterFence; - if (measure == V1_2::MeasureTiming::YES) driverStartAfterFence = Clock::now(); + time_point driverStartAfterFence; + if (measure == MeasureTiming::YES) driverStartAfterFence = now(); NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "SamplePreparedModel::executeFenced"); const auto [poolStatus, requestPoolInfos, bufferWrappers] = createRunTimePoolInfos(request, *mDriver, this); - if (poolStatus != V1_3::ErrorStatus::NONE) { - cb(poolStatus, hardware::hidl_handle(nullptr), nullptr); - return hardware::Void(); + if (poolStatus != ErrorStatus::NONE) { + cb(poolStatus, hidl_handle(nullptr), nullptr); + return Void(); } NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, "SamplePreparedModel::executeFenced"); CpuExecutor executor = mDriver->getExecutor(); if (loopTimeoutDuration.getDiscriminator() != - V1_3::OptionalTimeoutDuration::hidl_discriminator::none) { + OptionalTimeoutDuration::hidl_discriminator::none) { executor.setLoopTimeout(loopTimeoutDuration.nanoseconds()); } if (closestDeadline.has_value()) { executor.setDeadline(*closestDeadline); } - if (measure == V1_2::MeasureTiming::YES) deviceStart = Clock::now(); - int n = executor.run(uncheckedConvert(mModel), uncheckedConvert(request), mPoolInfos, - requestPoolInfos); - if (measure == V1_2::MeasureTiming::YES) deviceEnd = Clock::now(); + if (measure == MeasureTiming::YES) deviceStart = now(); + int n = executor.run(mModel, request, mPoolInfos, requestPoolInfos); + if (measure == MeasureTiming::YES) deviceEnd = now(); VLOG(DRIVER) << "executor.run returned " << n; - V1_3::ErrorStatus executionStatus = convertResultCodeToHalErrorStatus(n); - if (executionStatus != V1_3::ErrorStatus::NONE) { - cb(executionStatus, hardware::hidl_handle(nullptr), nullptr); - return hardware::Void(); + ErrorStatus executionStatus = convertResultCodeToErrorStatus(n); + if (executionStatus != ErrorStatus::NONE) { + cb(executionStatus, hidl_handle(nullptr), nullptr); + return Void(); } // Set output memories to the initialized state. - if (executionStatus == V1_3::ErrorStatus::NONE) { + if (executionStatus == ErrorStatus::NONE) { for (const auto& output : request.outputs) { const uint32_t poolIndex = output.location.poolIndex; const auto& pool = request.pools[poolIndex]; - if (pool.getDiscriminator() == V1_3::Request::MemoryPool::hidl_discriminator::token) { + if (pool.getDiscriminator() == Request::MemoryPool::hidl_discriminator::token) { bufferWrappers[poolIndex]->setInitialized(true); } } } - V1_2::Timing timingSinceLaunch = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; - V1_2::Timing timingAfterFence = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; - if (measure == V1_2::MeasureTiming::YES) { - driverEnd = Clock::now(); + Timing timingSinceLaunch = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; + Timing timingAfterFence = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; + if (measure == MeasureTiming::YES) { + driverEnd = now(); timingSinceLaunch = { .timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)), .timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))}; @@ -766,8 +745,8 @@ } sp<SampleFencedExecutionCallback> fencedExecutionCallback = new SampleFencedExecutionCallback(timingSinceLaunch, timingAfterFence, executionStatus); - cb(executionStatus, hardware::hidl_handle(nullptr), fencedExecutionCallback); - return hardware::Void(); + cb(executionStatus, hidl_handle(nullptr), fencedExecutionCallback); + return Void(); } // BurstExecutorWithCache maps hidl_memory when it is first seen, and preserves @@ -777,7 +756,7 @@ // unmapping the memory on each execution. class BurstExecutorWithCache : public ExecutionBurstServer::IBurstExecutorWithCache { public: - BurstExecutorWithCache(const V1_3::Model& model, const SampleDriver* driver, + BurstExecutorWithCache(const Model& model, const SampleDriver* driver, const std::vector<RunTimePoolInfo>& poolInfos) : mModel(model), mDriver(driver), mModelPoolInfos(poolInfos) {} @@ -786,20 +765,20 @@ return (it != mMemoryCache.end()) && it->second.has_value(); } - void addCacheEntry(const hardware::hidl_memory& memory, int32_t slot) override { - mMemoryCache[slot] = RunTimePoolInfo::createFromMemory(uncheckedConvert(memory)); + void addCacheEntry(const hidl_memory& memory, int32_t slot) override { + mMemoryCache[slot] = RunTimePoolInfo::createFromHidlMemory(memory); } void removeCacheEntry(int32_t slot) override { mMemoryCache.erase(slot); } - std::tuple<V1_0::ErrorStatus, hardware::hidl_vec<V1_2::OutputShape>, V1_2::Timing> execute( + std::tuple<V1_0::ErrorStatus, hidl_vec<OutputShape>, Timing> execute( const V1_0::Request& request, const std::vector<int32_t>& slots, - V1_2::MeasureTiming measure) override { + MeasureTiming measure) override { NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, "BurstExecutorWithCache::execute"); - TimePoint driverStart, driverEnd, deviceStart, deviceEnd; - if (measure == V1_2::MeasureTiming::YES) driverStart = Clock::now(); + time_point driverStart, driverEnd, deviceStart, deviceEnd; + if (measure == MeasureTiming::YES) driverStart = now(); // ensure all relevant pools are valid if (!std::all_of(slots.begin(), slots.end(), @@ -808,13 +787,13 @@ } // finish the request object (for validation) - hardware::hidl_vec<V1_3::Request::MemoryPool> pools(slots.size()); + hidl_vec<Request::MemoryPool> pools(slots.size()); std::transform(slots.begin(), slots.end(), pools.begin(), [this](int32_t slot) { - V1_3::Request::MemoryPool pool; - pool.hidlMemory(convertToV1_0(mMemoryCache[slot]->getMemory())); + Request::MemoryPool pool; + pool.hidlMemory(mMemoryCache[slot]->getHidlMemory()); return pool; }); - V1_3::Request fullRequest = {.inputs = request.inputs, .outputs = request.outputs}; + Request fullRequest = {.inputs = request.inputs, .outputs = request.outputs}; fullRequest.pools = std::move(pools); // validate request object against the model @@ -833,17 +812,15 @@ // because burst does not support HAL 1.3 and hence does not support // WHILE loops. CpuExecutor executor = mDriver->getExecutor(); - if (measure == V1_2::MeasureTiming::YES) deviceStart = Clock::now(); - int n = executor.run(uncheckedConvert(mModel), uncheckedConvert(fullRequest), - mModelPoolInfos, requestPoolInfos); - if (measure == V1_2::MeasureTiming::YES) deviceEnd = Clock::now(); + if (measure == MeasureTiming::YES) deviceStart = now(); + int n = executor.run(mModel, fullRequest, mModelPoolInfos, requestPoolInfos); + if (measure == MeasureTiming::YES) deviceEnd = now(); VLOG(DRIVER) << "executor.run returned " << n; - V1_0::ErrorStatus executionStatus = convertToV1_0(convertResultCodeToHalErrorStatus(n)); - hardware::hidl_vec<V1_2::OutputShape> outputShapes = - convertToV1_2(executor.getOutputShapes()); - if (measure == V1_2::MeasureTiming::YES && executionStatus == V1_0::ErrorStatus::NONE) { - driverEnd = Clock::now(); - V1_2::Timing timing = { + V1_0::ErrorStatus executionStatus = convertToV1_0(convertResultCodeToErrorStatus(n)); + hidl_vec<OutputShape> outputShapes = executor.getOutputShapes(); + if (measure == MeasureTiming::YES && executionStatus == V1_0::ErrorStatus::NONE) { + driverEnd = now(); + Timing timing = { .timeOnDevice = uint64_t(microsecondsDuration(deviceEnd, deviceStart)), .timeInDriver = uint64_t(microsecondsDuration(driverEnd, driverStart))}; VLOG(DRIVER) << "BurstExecutorWithCache::execute timing = " << toString(timing); @@ -854,7 +831,7 @@ } private: - const V1_3::Model mModel; + const Model mModel; const SampleDriver* const mDriver; const std::vector<RunTimePoolInfo> mModelPoolInfos; std::map<int32_t, std::optional<RunTimePoolInfo>> mMemoryCache; // cached requestPoolInfos @@ -876,7 +853,7 @@ #endif // NN_DEBUGGABLE } -hardware::Return<void> SamplePreparedModel::configureExecutionBurst( +Return<void> SamplePreparedModel::configureExecutionBurst( const sp<V1_2::IBurstCallback>& callback, const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel, const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, @@ -884,7 +861,7 @@ NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, "SampleDriver::configureExecutionBurst"); - const bool preferPowerOverLatency = (kPreference == V1_1::ExecutionPreference::LOW_POWER); + const bool preferPowerOverLatency = (kPreference == ExecutionPreference::LOW_POWER); const auto pollingTimeWindow = (preferPowerOverLatency ? std::chrono::microseconds{0} : getPollingTimeWindow()); @@ -907,7 +884,7 @@ cb(V1_0::ErrorStatus::NONE, burst); } - return hardware::Void(); + return Void(); } } // namespace sample_driver
diff --git a/driver/sample/SampleDriver.h b/driver/sample/SampleDriver.h index ca7bc9e..007e8a6 100644 --- a/driver/sample/SampleDriver.h +++ b/driver/sample/SampleDriver.h
@@ -17,9 +17,6 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_SAMPLE_DRIVER_H #define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_SAMPLE_DRIVER_H -#include <CpuExecutor.h> -#include <HalBufferTracker.h> -#include <HalInterfaces.h> #include <hwbinder/IPCThreadState.h> #include <memory> @@ -27,6 +24,9 @@ #include <utility> #include <vector> +#include "BufferTracker.h" +#include "CpuExecutor.h" +#include "HalInterfaces.h" #include "NeuralNetworks.h" namespace android { @@ -36,22 +36,20 @@ using hardware::MQDescriptorSync; // Manages the data buffer for an operand. -class SampleBuffer : public V1_3::IBuffer { +class SampleBuffer : public hal::IBuffer { public: - SampleBuffer(std::shared_ptr<HalManagedBuffer> buffer, - std::unique_ptr<HalBufferTracker::Token> token) + SampleBuffer(std::shared_ptr<ManagedBuffer> buffer, std::unique_ptr<BufferTracker::Token> token) : kBuffer(std::move(buffer)), kToken(std::move(token)) { CHECK(kBuffer != nullptr); CHECK(kToken != nullptr); } - hardware::Return<V1_3::ErrorStatus> copyTo(const hardware::hidl_memory& dst) override; - hardware::Return<V1_3::ErrorStatus> copyFrom( - const hardware::hidl_memory& src, - const hardware::hidl_vec<uint32_t>& dimensions) override; + hal::Return<hal::ErrorStatus> copyTo(const hal::hidl_memory& dst) override; + hal::Return<hal::ErrorStatus> copyFrom(const hal::hidl_memory& src, + const hal::hidl_vec<uint32_t>& dimensions) override; private: - const std::shared_ptr<HalManagedBuffer> kBuffer; - const std::unique_ptr<HalBufferTracker::Token> kToken; + const std::shared_ptr<ManagedBuffer> kBuffer; + const std::unique_ptr<BufferTracker::Token> kToken; }; // Base class used to create sample drivers for the NN HAL. This class @@ -59,79 +57,78 @@ // // Since these drivers simulate hardware, they must run the computations // on the CPU. An actual driver would not do that. -class SampleDriver : public V1_3::IDevice { +class SampleDriver : public hal::IDevice { public: SampleDriver(const char* name, const IOperationResolver* operationResolver = BuiltinOperationResolver::get()) : mName(name), mOperationResolver(operationResolver), - mHalBufferTracker(HalBufferTracker::create()) { + mBufferTracker(BufferTracker::create()) { android::nn::initVLogMask(); } - hardware::Return<void> getCapabilities(getCapabilities_cb cb) override; - hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb cb) override; - hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override; - hardware::Return<void> getVersionString(getVersionString_cb cb) override; - hardware::Return<void> getType(getType_cb cb) override; - hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb) override; - hardware::Return<void> getSupportedOperations(const V1_0::Model& model, - getSupportedOperations_cb cb) override; - hardware::Return<void> getSupportedOperations_1_1(const V1_1::Model& model, - getSupportedOperations_1_1_cb cb) override; - hardware::Return<void> getSupportedOperations_1_2(const V1_2::Model& model, - getSupportedOperations_1_2_cb cb) override; - hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override; - hardware::Return<V1_0::ErrorStatus> prepareModel( - const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) override; - hardware::Return<V1_0::ErrorStatus> prepareModel_1_1( - const V1_1::Model& model, V1_1::ExecutionPreference preference, - const sp<V1_0::IPreparedModelCallback>& callback) override; - hardware::Return<V1_0::ErrorStatus> prepareModel_1_2( - const V1_2::Model& model, V1_1::ExecutionPreference preference, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token, - const sp<V1_2::IPreparedModelCallback>& callback) override; - hardware::Return<V1_3::ErrorStatus> prepareModel_1_3( - const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, - const V1_3::OptionalTimePoint& deadline, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token, - const sp<V1_3::IPreparedModelCallback>& callback) override; - hardware::Return<V1_0::ErrorStatus> prepareModelFromCache( - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token, - const sp<V1_2::IPreparedModelCallback>& callback) override; - hardware::Return<V1_3::ErrorStatus> prepareModelFromCache_1_3( - const V1_3::OptionalTimePoint& deadline, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token, - const sp<V1_3::IPreparedModelCallback>& callback) override; - hardware::Return<V1_0::DeviceStatus> getStatus() override; - hardware::Return<void> allocate( - const V1_3::BufferDesc& desc, - const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels, - const hardware::hidl_vec<V1_3::BufferRole>& inputRoles, - const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) override; + hal::Return<void> getCapabilities(getCapabilities_cb cb) override; + hal::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb cb) override; + hal::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb cb) override; + hal::Return<void> getVersionString(getVersionString_cb cb) override; + hal::Return<void> getType(getType_cb cb) override; + hal::Return<void> getSupportedExtensions(getSupportedExtensions_cb) override; + hal::Return<void> getSupportedOperations(const hal::V1_0::Model& model, + getSupportedOperations_cb cb) override; + hal::Return<void> getSupportedOperations_1_1(const hal::V1_1::Model& model, + getSupportedOperations_1_1_cb cb) override; + hal::Return<void> getSupportedOperations_1_2(const hal::V1_2::Model& model, + getSupportedOperations_1_2_cb cb) override; + hal::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override; + hal::Return<hal::V1_0::ErrorStatus> prepareModel( + const hal::V1_0::Model& model, + const sp<hal::V1_0::IPreparedModelCallback>& callback) override; + hal::Return<hal::V1_0::ErrorStatus> prepareModel_1_1( + const hal::V1_1::Model& model, hal::ExecutionPreference preference, + const sp<hal::V1_0::IPreparedModelCallback>& callback) override; + hal::Return<hal::V1_0::ErrorStatus> prepareModel_1_2( + const hal::V1_2::Model& model, hal::ExecutionPreference preference, + const hal::hidl_vec<hal::hidl_handle>& modelCache, + const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token, + const sp<hal::V1_2::IPreparedModelCallback>& callback) override; + hal::Return<hal::V1_3::ErrorStatus> prepareModel_1_3( + const hal::V1_3::Model& model, hal::ExecutionPreference preference, + hal::Priority priority, const hal::OptionalTimePoint& deadline, + const hal::hidl_vec<hal::hidl_handle>& modelCache, + const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token, + const sp<hal::V1_3::IPreparedModelCallback>& callback) override; + hal::Return<hal::V1_0::ErrorStatus> prepareModelFromCache( + const hal::hidl_vec<hal::hidl_handle>& modelCache, + const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token, + const sp<hal::V1_2::IPreparedModelCallback>& callback) override; + hal::Return<hal::V1_3::ErrorStatus> prepareModelFromCache_1_3( + const hal::OptionalTimePoint& deadline, + const hal::hidl_vec<hal::hidl_handle>& modelCache, + const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token, + const sp<hal::V1_3::IPreparedModelCallback>& callback) override; + hal::Return<hal::DeviceStatus> getStatus() override; + hal::Return<void> allocate(const hal::V1_3::BufferDesc& desc, + const hal::hidl_vec<sp<hal::V1_3::IPreparedModel>>& preparedModels, + const hal::hidl_vec<hal::V1_3::BufferRole>& inputRoles, + const hal::hidl_vec<hal::V1_3::BufferRole>& outputRoles, + allocate_cb cb) override; // Starts and runs the driver service. Typically called from main(). // This will return only once the service shuts down. int run(); CpuExecutor getExecutor() const { return CpuExecutor(mOperationResolver); } - const std::shared_ptr<HalBufferTracker>& getHalBufferTracker() const { - return mHalBufferTracker; - } + const std::shared_ptr<BufferTracker>& getBufferTracker() const { return mBufferTracker; } protected: std::string mName; const IOperationResolver* mOperationResolver; - const std::shared_ptr<HalBufferTracker> mHalBufferTracker; + const std::shared_ptr<BufferTracker> mBufferTracker; }; -class SamplePreparedModel : public V1_3::IPreparedModel { +class SamplePreparedModel : public hal::IPreparedModel { public: - SamplePreparedModel(const V1_3::Model& model, const SampleDriver* driver, - V1_1::ExecutionPreference preference, uid_t userId, V1_3::Priority priority) + SamplePreparedModel(const hal::Model& model, const SampleDriver* driver, + hal::ExecutionPreference preference, uid_t userId, hal::Priority priority) : mModel(model), mDriver(driver), kPreference(preference), @@ -141,63 +138,64 @@ (void)kPriority; } bool initialize(); - hardware::Return<V1_0::ErrorStatus> execute( - const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override; - hardware::Return<V1_0::ErrorStatus> execute_1_2( - const V1_0::Request& request, V1_2::MeasureTiming measure, - const sp<V1_2::IExecutionCallback>& callback) override; - hardware::Return<V1_3::ErrorStatus> execute_1_3( - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - const sp<V1_3::IExecutionCallback>& callback) override; - hardware::Return<void> executeSynchronously(const V1_0::Request& request, - V1_2::MeasureTiming measure, - executeSynchronously_cb cb) override; - hardware::Return<void> executeSynchronously_1_3( - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, + hal::Return<hal::V1_0::ErrorStatus> execute( + const hal::V1_0::Request& request, + const sp<hal::V1_0::IExecutionCallback>& callback) override; + hal::Return<hal::V1_0::ErrorStatus> execute_1_2( + const hal::V1_0::Request& request, hal::MeasureTiming measure, + const sp<hal::V1_2::IExecutionCallback>& callback) override; + hal::Return<hal::V1_3::ErrorStatus> execute_1_3( + const hal::V1_3::Request& request, hal::MeasureTiming measure, + const hal::OptionalTimePoint& deadline, + const hal::OptionalTimeoutDuration& loopTimeoutDuration, + const sp<hal::V1_3::IExecutionCallback>& callback) override; + hal::Return<void> executeSynchronously(const hal::V1_0::Request& request, + hal::MeasureTiming measure, + executeSynchronously_cb cb) override; + hal::Return<void> executeSynchronously_1_3( + const hal::V1_3::Request& request, hal::MeasureTiming measure, + const hal::OptionalTimePoint& deadline, + const hal::OptionalTimeoutDuration& loopTimeoutDuration, executeSynchronously_1_3_cb cb) override; - hardware::Return<void> configureExecutionBurst( - const sp<V1_2::IBurstCallback>& callback, - const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel, - const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, + hal::Return<void> configureExecutionBurst( + const sp<hal::V1_2::IBurstCallback>& callback, + const MQDescriptorSync<hal::V1_2::FmqRequestDatum>& requestChannel, + const MQDescriptorSync<hal::V1_2::FmqResultDatum>& resultChannel, configureExecutionBurst_cb cb) override; - hardware::Return<void> executeFenced(const V1_3::Request& request, - const hardware::hidl_vec<hardware::hidl_handle>& wait_for, - V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - const V1_3::OptionalTimeoutDuration& duration, - executeFenced_cb callback) override; - const V1_3::Model* getModel() const { return &mModel; } + hal::Return<void> executeFenced(const hal::Request& request, + const hal::hidl_vec<hal::hidl_handle>& wait_for, + hal::MeasureTiming measure, + const hal::OptionalTimePoint& deadline, + const hal::OptionalTimeoutDuration& loopTimeoutDuration, + const hal::OptionalTimeoutDuration& duration, + executeFenced_cb callback) override; + const hal::Model* getModel() const { return &mModel; } - protected: - V1_3::Model mModel; + private: + hal::Model mModel; const SampleDriver* mDriver; std::vector<RunTimePoolInfo> mPoolInfos; - const V1_1::ExecutionPreference kPreference; + const hal::ExecutionPreference kPreference; const uid_t kUserId; - const V1_3::Priority kPriority; + const hal::Priority kPriority; }; -class SampleFencedExecutionCallback : public V1_3::IFencedExecutionCallback { +class SampleFencedExecutionCallback : public hal::IFencedExecutionCallback { public: - SampleFencedExecutionCallback(V1_2::Timing timingSinceLaunch, V1_2::Timing timingAfterFence, - V1_3::ErrorStatus error) + SampleFencedExecutionCallback(hal::Timing timingSinceLaunch, hal::Timing timingAfterFence, + hal::ErrorStatus error) : kTimingSinceLaunch(timingSinceLaunch), kTimingAfterFence(timingAfterFence), kErrorStatus(error) {} - hardware::Return<void> getExecutionInfo(getExecutionInfo_cb callback) override { + hal::Return<void> getExecutionInfo(getExecutionInfo_cb callback) override { callback(kErrorStatus, kTimingSinceLaunch, kTimingAfterFence); - return hardware::Void(); + return hal::Void(); } private: - const V1_2::Timing kTimingSinceLaunch; - const V1_2::Timing kTimingAfterFence; - const V1_3::ErrorStatus kErrorStatus; + const hal::Timing kTimingSinceLaunch; + const hal::Timing kTimingAfterFence; + const hal::ErrorStatus kErrorStatus; }; } // namespace sample_driver
diff --git a/driver/sample/SampleDriverAll.cpp b/driver/sample/SampleDriverAll.cpp index a693b0d..23a36f8 100644 --- a/driver/sample/SampleDriverAll.cpp +++ b/driver/sample/SampleDriverAll.cpp
@@ -16,10 +16,10 @@ #define LOG_TAG "SampleDriverAll" -#include <hidl/LegacySupport.h> - #include "SampleDriverFull.h" +#include <hidl/LegacySupport.h> + using android::sp; using android::nn::sample_driver::SampleDriverFull;
diff --git a/driver/sample/SampleDriverFloatFast.cpp b/driver/sample/SampleDriverFloatFast.cpp index 2123df4..bb4b815 100644 --- a/driver/sample/SampleDriverFloatFast.cpp +++ b/driver/sample/SampleDriverFloatFast.cpp
@@ -16,46 +16,49 @@ #define LOG_TAG "SampleDriverFloatFast" -#include <HalInterfaces.h> -#include <Utils.h> #include <android-base/logging.h> #include <hidl/LegacySupport.h> #include <thread> #include <vector> +#include "HalInterfaces.h" #include "SampleDriverPartial.h" +#include "Utils.h" +#include "ValidateHal.h" namespace android { namespace nn { namespace sample_driver { +using namespace hal; + class SampleDriverFloatFast : public SampleDriverPartial { public: SampleDriverFloatFast() : SampleDriverPartial("nnapi-sample_float_fast") {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; private: std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override; }; -hardware::Return<void> SampleDriverFloatFast::getCapabilities_1_3(getCapabilities_1_3_cb cb) { +Return<void> SampleDriverFloatFast::getCapabilities_1_3(getCapabilities_1_3_cb cb) { android::nn::initVLogMask(); VLOG(DRIVER) << "getCapabilities()"; - V1_3::Capabilities capabilities = { + Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.7f, .powerUsage = 1.1f}, .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.7f, .powerUsage = 1.1f}, .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({1.0f, 1.0f}), .ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f}, .whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}}; - update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32, + update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32, {.execTime = 0.8f, .powerUsage = 1.2f}); - update(&capabilities.operandPerformance, V1_3::OperandType::FLOAT32, + update(&capabilities.operandPerformance, OperandType::FLOAT32, {.execTime = 0.8f, .powerUsage = 1.2f}); - cb(V1_3::ErrorStatus::NONE, capabilities); - return hardware::Void(); + cb(ErrorStatus::NONE, capabilities); + return Void(); } std::vector<bool> SampleDriverFloatFast::getSupportedOperationsImpl( @@ -63,10 +66,10 @@ const size_t count = model.main.operations.size(); std::vector<bool> supported(count); for (size_t i = 0; i < count; i++) { - const V1_3::Operation& operation = model.main.operations[i]; - if (!isExtensionOperationType(operation.type) && operation.inputs.size() > 0) { - const V1_3::Operand& firstOperand = model.main.operands[operation.inputs[0]]; - supported[i] = firstOperand.type == V1_3::OperandType::TENSOR_FLOAT32; + const Operation& operation = model.main.operations[i]; + if (operation.inputs.size() > 0) { + const Operand& firstOperand = model.main.operands[operation.inputs[0]]; + supported[i] = firstOperand.type == OperandType::TENSOR_FLOAT32; } } return supported;
diff --git a/driver/sample/SampleDriverFloatSlow.cpp b/driver/sample/SampleDriverFloatSlow.cpp index ad9dda9..12e972c 100644 --- a/driver/sample/SampleDriverFloatSlow.cpp +++ b/driver/sample/SampleDriverFloatSlow.cpp
@@ -16,46 +16,49 @@ #define LOG_TAG "SampleDriverFloatSlow" -#include <HalInterfaces.h> -#include <Utils.h> #include <android-base/logging.h> #include <hidl/LegacySupport.h> #include <thread> #include <vector> +#include "HalInterfaces.h" #include "SampleDriverPartial.h" +#include "Utils.h" +#include "ValidateHal.h" namespace android { namespace nn { namespace sample_driver { +using namespace hal; + class SampleDriverFloatSlow : public SampleDriverPartial { public: SampleDriverFloatSlow() : SampleDriverPartial("nnapi-sample_float_slow") {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; private: std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override; }; -hardware::Return<void> SampleDriverFloatSlow::getCapabilities_1_3(getCapabilities_1_3_cb cb) { +Return<void> SampleDriverFloatSlow::getCapabilities_1_3(getCapabilities_1_3_cb cb) { android::nn::initVLogMask(); VLOG(DRIVER) << "getCapabilities()"; - V1_3::Capabilities capabilities = { + Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 1.2f, .powerUsage = 0.6f}, .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 1.2f, .powerUsage = 0.6f}, .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({1.0f, 1.0f}), .ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f}, .whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}}; - update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32, + update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32, {.execTime = 1.3f, .powerUsage = 0.7f}); - update(&capabilities.operandPerformance, V1_3::OperandType::FLOAT32, + update(&capabilities.operandPerformance, OperandType::FLOAT32, {.execTime = 1.3f, .powerUsage = 0.7f}); - cb(V1_3::ErrorStatus::NONE, capabilities); - return hardware::Void(); + cb(ErrorStatus::NONE, capabilities); + return Void(); } std::vector<bool> SampleDriverFloatSlow::getSupportedOperationsImpl( @@ -63,10 +66,10 @@ const size_t count = model.main.operations.size(); std::vector<bool> supported(count); for (size_t i = 0; i < count; i++) { - const V1_3::Operation& operation = model.main.operations[i]; - if (!isExtensionOperationType(operation.type) && operation.inputs.size() > 0) { - const V1_3::Operand& firstOperand = model.main.operands[operation.inputs[0]]; - supported[i] = firstOperand.type == V1_3::OperandType::TENSOR_FLOAT32; + const Operation& operation = model.main.operations[i]; + if (operation.inputs.size() > 0) { + const Operand& firstOperand = model.main.operands[operation.inputs[0]]; + supported[i] = firstOperand.type == OperandType::TENSOR_FLOAT32; } } return supported;
diff --git a/driver/sample/SampleDriverFloatXNNPACK.cpp b/driver/sample/SampleDriverFloatXNNPACK.cpp deleted file mode 100644 index cd80620..0000000 --- a/driver/sample/SampleDriverFloatXNNPACK.cpp +++ /dev/null
@@ -1,1998 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "SampleDriverFloatXNNPACK" - -#include <CpuExecutor.h> -#include <HalInterfaces.h> -#include <Utils.h> -#include <ValidateHal.h> -#include <android-base/logging.h> -#include <hidl/LegacySupport.h> -#include <hwbinder/IPCThreadState.h> -#include <xnnpack.h> - -#include <algorithm> -#include <cstdint> -#include <limits> -#include <memory> -#include <string> -#include <thread> -#include <tuple> -#include <unordered_set> -#include <utility> -#include <vector> - -#include "SampleDriverPartial.h" -#include "SampleDriverUtils.h" - -namespace android { -namespace nn { -namespace sample_driver { - -namespace { - -#define NN_DRIVER_RETURN_IF_ERROR(expr) \ - do { \ - V1_3::ErrorStatus _errorCode = (expr); \ - if (_errorCode != V1_3::ErrorStatus::NONE) { \ - return _errorCode; \ - } \ - } while (0) - -const size_t kNumOfWorkerThreads = 1; -static const V1_2::Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; - -bool isScalarType(OperandType type) { - switch (type) { - case OperandType::FLOAT16: - case OperandType::FLOAT32: - case OperandType::INT32: - case OperandType::UINT32: - case OperandType::BOOL: - return true; - default: - return false; - } -} - -void updateForArguments(const std::vector<uint32_t>& indexes, - const hardware::hidl_vec<V1_0::RequestArgument>& arguments, - const std::vector<RunTimePoolInfo>& requestPoolInfos, - RunTimeOperandInfo* operands) { - CHECK_EQ(indexes.size(), arguments.size()); - for (size_t i = 0; i < indexes.size(); i++) { - const uint32_t operandIndex = indexes[i]; - const V1_0::RequestArgument& from = arguments[i]; - RunTimeOperandInfo& to = operands[operandIndex]; - if (from.dimensions.size() > 0) { - // It's the responsibility of the caller to validate that - // from.dimensions only modifies the dimensions that were - // unspecified in the model. That's the case in SampleDriver.cpp - // with the call to validateRequest(). - // TODO make sure that's the case for the default CPU path. - to.dimensions = from.dimensions; - } - if (from.hasNoValue) { - to.lifetime = Operand::LifeTime::NO_VALUE; - CHECK(to.buffer == nullptr); - to.length = 0; - } else { - auto poolIndex = from.location.poolIndex; - CHECK_LT(poolIndex, requestPoolInfos.size()); - auto& r = requestPoolInfos[poolIndex]; - to.buffer = r.getBuffer() + from.location.offset; - if (from.location.offset == 0 && from.location.length == 0) { - // Use the entire memory region. - to.length = r.getSize(); - } else { - to.length = from.location.length; - } - } - } -} - -std::vector<RunTimeOperandInfo> initializeRunTimeInfo( - const V1_3::Subgraph& subgraph, const std::vector<RunTimePoolInfo>& modelPoolInfos, - const hardware::hidl_vec<uint8_t>* mModelOperandValues) { - const size_t count = subgraph.operands.size(); - std::vector<RunTimeOperandInfo> operands(count); - for (size_t i = 0; i < count; i++) { - const V1_3::Operand& from = subgraph.operands[i]; - RunTimeOperandInfo& to = operands[i]; - to.type = uncheckedConvert(from.type); - to.dimensions = from.dimensions; - to.scale = from.scale; - to.zeroPoint = from.zeroPoint; - to.length = from.location.length; - to.lifetime = uncheckedConvert(from.lifetime); - to.extraParams = uncheckedConvert(from.extraParams); - switch (from.lifetime) { - case V1_3::OperandLifeTime::TEMPORARY_VARIABLE: - to.buffer = nullptr; - to.numberOfUsesLeft = from.numberOfConsumers; - break; - case V1_3::OperandLifeTime::CONSTANT_COPY: - to.buffer = const_cast<uint8_t*>(&(*mModelOperandValues)[from.location.offset]); - to.numberOfUsesLeft = 0; - break; - case V1_3::OperandLifeTime::CONSTANT_REFERENCE: { - auto poolIndex = from.location.poolIndex; - CHECK_LT(poolIndex, modelPoolInfos.size()); - auto& r = modelPoolInfos[poolIndex]; - to.buffer = r.getBuffer() + from.location.offset; - to.numberOfUsesLeft = 0; - break; - } - case V1_3::OperandLifeTime::SUBGRAPH: - case V1_3::OperandLifeTime::SUBGRAPH_INPUT: - case V1_3::OperandLifeTime::SUBGRAPH_OUTPUT: - case V1_3::OperandLifeTime::NO_VALUE: - to.buffer = nullptr; - to.numberOfUsesLeft = 0; - break; - } - } - return operands; -} - -} // namespace - -class Subgraph { - public: - static Subgraph* Create(const hardware::hidl_vec<V1_3::Operation>& operations, - std::vector<RunTimeOperandInfo>& operands, - const std::vector<uint32_t>& inputIndexes, - const std::vector<uint32_t>& outputIndexes, pthreadpool_t threadpool, - bool useStaticBuffer = false) { - // Convert subgraph inputs and outputs to hash sets for faster lookup. - const std::unordered_set<uint32_t> inputs(inputIndexes.begin(), inputIndexes.end()); - const std::unordered_set<uint32_t> outputs(outputIndexes.begin(), outputIndexes.end()); - std::unordered_set<uint32_t> externals(outputs); - - xnn_subgraph_t subgraphPtr = nullptr; - xnn_status status = xnn_create_subgraph( - /*external_value_ids=*/operands.size(), /*flags=*/0, &subgraphPtr); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_create_subgraph FAILED"; - return nullptr; - } - - // Smart pointer to automatically release subgraph on exit. - std::unique_ptr<xnn_subgraph, decltype(&xnn_delete_subgraph)> subgraph( - subgraphPtr, &xnn_delete_subgraph); - - // Detect which tensors are used as inputs or outputs of any subgraph nodes. - // -1 denotes tensor not used in the subgraph. - std::vector<int> tensors(operands.size(), -1); - - for (const auto& operation : operations) { - const std::vector<uint32_t>& ins = operation.inputs; - const std::vector<uint32_t>& outs = operation.outputs; - switch (operation.type) { - case V1_3::OperationType::MEAN: - case V1_3::OperationType::PAD: - case V1_3::OperationType::RESHAPE: - case V1_3::OperationType::RESIZE_BILINEAR: - // Ignore the second input (axes, static padding, or new shape), - // because it is represented as parameters of the XNNPACK operator - // rather than extra input. - { - const int t = ins[0]; - tensors[t] = t; - } - break; - default: - // All other operators: process all inputs - for (size_t k = 0; k < ins.size(); k++) { - if (isScalarType(operands[ins[k]].type)) continue; - const int t = ins[k]; - tensors[t] = t; - } - } - for (size_t k = 0; k < outs.size(); k++) { - if (isScalarType(operands[outs[k]].type)) continue; - const int t = outs[k]; - tensors[t] = t; - } - } - - // XNNPACK Value IDs for NNAPI Operands - std::vector<uint32_t> xnnpackTensors(operands.size()); - for (int t : tensors) { - if (t < 0) continue; - if (operands[tensors[t]].type != OperandType::TENSOR_FLOAT32) { - LOG(ERROR) << "XNNPACK only support FLOAT32 tensors"; - return nullptr; - } - - uint32_t flags = 0; - const void* data = nullptr; - if (operands[tensors[t]].lifetime == Operand::LifeTime::CONSTANT_COPY || - operands[tensors[t]].lifetime == Operand::LifeTime::CONSTANT_REFERENCE || - operands[tensors[t]].lifetime == Operand::LifeTime::POINTER) { - data = operands[tensors[t]].buffer; - } - if (inputs.count(t) != 0) { - flags |= XNN_VALUE_FLAG_EXTERNAL_INPUT; - CHECK(data == nullptr); - VLOG(DRIVER) << "found input tensor, add to external"; - externals.insert(static_cast<uint32_t>(t)); - } - if (outputs.count(t) != 0) { - flags |= XNN_VALUE_FLAG_EXTERNAL_OUTPUT; - } - - std::vector<size_t> dims(operands[tensors[t]].dimensions.size()); - for (size_t i = 0; i < dims.size(); i++) { - dims[i] = operands[tensors[t]].dimensions[i]; - } - - const xnn_status status = xnn_define_tensor_value( - subgraph.get(), xnn_datatype_fp32, dims.size(), dims.data(), data, - static_cast<uint32_t>(t), flags, &xnnpackTensors[t]); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_tensor_value failed"; - return nullptr; - } - } - - // Create XNNPACK nodes for NNAPI Operations - for (const auto& operation : operations) { - if (VisitNode(subgraph.get(), operation, operands.data(), xnnpackTensors) != - V1_3::ErrorStatus::NONE) { - LOG(ERROR) << "XNNPACK add op failed"; - return nullptr; - } - } - - xnn_runtime_t runtimePtr = nullptr; - status = xnn_create_runtime_v2(subgraph.get(), threadpool, /*flags=*/0, &runtimePtr); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_create_runtime_v2 FAILED"; - return nullptr; - } - return new Subgraph(runtimePtr, std::move(externals), useStaticBuffer); - } - - V1_3::ErrorStatus Prepare() { return V1_3::ErrorStatus::NONE; } - - V1_3::ErrorStatus Invoke(RunTimeOperandInfo* operands) { - VLOG(DRIVER) << "Subgraph::Invoke() start"; - if (!mUseStaticBuffer || mFirstRun) { - VLOG(DRIVER) << "Setup buffer for Subgraph"; - std::vector<xnn_external_value> externalValues; - - for (uint32_t t : mExternals) { - xnn_external_value value = {.id = 0, .data = nullptr}; - value.id = t; - value.data = operands[t].buffer; - externalValues.push_back(value); - } - - const xnn_status status = - xnn_setup_runtime(mRuntime.get(), externalValues.size(), externalValues.data()); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_setup_runtime FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - mFirstRun = false; - } - VLOG(DRIVER) << "Subgraph::Invoke() finished xnn_setup_runtime"; - const xnn_status status = xnn_invoke_runtime(mRuntime.get()); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_invoke_runtime FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus CalculatePadding(int padding, uint32_t* flags) { - switch (padding) { - case ANEURALNETWORKS_PADDING_SAME: - *flags = XNN_FLAG_TENSORFLOW_SAME_PADDING; - return V1_3::ErrorStatus::NONE; - case ANEURALNETWORKS_PADDING_VALID: - *flags = 0; - return V1_3::ErrorStatus::NONE; - default: - LOG(ERROR) << "invalid padding mode"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - } - - static V1_3::ErrorStatus ConvertActivationToOutputRange(int activation, float* outputMin, - float* outputMax) { - switch (activation) { - case ANEURALNETWORKS_FUSED_NONE: - *outputMin = -std::numeric_limits<float>::infinity(); - *outputMax = +std::numeric_limits<float>::infinity(); - return V1_3::ErrorStatus::NONE; - case ANEURALNETWORKS_FUSED_RELU: - *outputMin = 0.0f; - *outputMax = +std::numeric_limits<float>::infinity(); - return V1_3::ErrorStatus::NONE; - case ANEURALNETWORKS_FUSED_RELU1: - *outputMin = -1.0f; - *outputMax = +1.0f; - return V1_3::ErrorStatus::NONE; - case ANEURALNETWORKS_FUSED_RELU6: - *outputMin = 0.0f; - *outputMax = 6.0f; - return V1_3::ErrorStatus::NONE; - default: - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - } - - static V1_3::ErrorStatus CheckConvolutionParams(int32_t stride_width, int32_t stride_height, - int32_t dilation_width_factor, - int32_t dilation_height_factor) { - if (stride_width <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (stride_height <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - - if (dilation_width_factor <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (dilation_height_factor <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus CheckDepthwiseConvolutionParams( - int32_t stride_width, int32_t stride_height, int32_t dilation_width_factor, - int32_t dilation_height_factor, int32_t depth_multiplier, uint32_t output_channels) { - if (stride_width <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (stride_height <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - - if (depth_multiplier <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (output_channels % depth_multiplier != 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - - if (dilation_width_factor <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (dilation_height_factor <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus CheckPoolingParams(int32_t stride_width, int32_t stride_height, - int32_t filter_width, int32_t filter_height) { - if (stride_width <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (stride_height <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - - if (filter_width <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (filter_height <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (filter_width == 1 && filter_height == 1 && std::max(stride_width, stride_height) > 1) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus CheckNumInputsAndOutputs(const V1_3::Operation& operation, - uint32_t expected_num_inputs, - uint32_t expected_num_outputs) { - if (operation.inputs.size() != expected_num_inputs) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (operation.outputs.size() != expected_num_outputs) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus CheckTensorType(OperandType tensor_type, OperandType expected_type) { - if (tensor_type != expected_type) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus CheckTensorFloatType(OperandType tensor_type) { - if (tensor_type != OperandType::TENSOR_FLOAT32) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus CheckTensorShape(std::vector<uint32_t>& dimensions, - uint32_t min_num_dims, uint32_t max_num_dims) { - if (min_num_dims == max_num_dims) { - if (dimensions.size() != min_num_dims) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - } else { - if (dimensions.size() < min_num_dims || dimensions.size() > max_num_dims) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - } - for (size_t i = 0; i < dimensions.size(); i++) { - if (dimensions[i] <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus CheckTensorShape(std::vector<uint32_t>& dimensions, - int expected_num_dims) { - return CheckTensorShape(dimensions, expected_num_dims, expected_num_dims); - } - - static V1_3::ErrorStatus CheckSlopeTensorShape(std::vector<uint32_t>& dimensions) { - if (dimensions.size() < 1) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - // Validate that all non-channel dimensions (if any) are exactly 1. - for (size_t i = 0; i < dimensions.size() - 1; i++) { - if (dimensions[i] != 1) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus CheckAxesTensorShape(std::vector<uint32_t>& dimensions) { - if (dimensions.size() != 1) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus CheckShapeTensorShape(std::vector<uint32_t>& dimensions) { - if (dimensions.size() != 1) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus CheckTensorStaticAllocation(Operand::LifeTime lifetime) { - if (lifetime != Operand::LifeTime::CONSTANT_COPY && - lifetime != Operand::LifeTime::CONSTANT_REFERENCE && - lifetime != Operand::LifeTime::POINTER) { - VLOG(DRIVER) << "CheckTensorStaticAllocation: " << toString(convertToV1_3(lifetime)); - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - switch (operation.type) { - case V1_3::OperationType::ABS: - return VisitAbsNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::ADD: - return VisitAddNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::AVERAGE_POOL_2D: - return VisitAveragePool2DNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::CONV_2D: - return VisitConv2DNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::DEPTHWISE_CONV_2D: - return VisitDepthwiseConv2DNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::DIV: - return VisitDivNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::FLOOR: - return VisitFloorNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::FULLY_CONNECTED: - return VisitFullyConnectedNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::HARD_SWISH: - return VisitHardSwishNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::LOGISTIC: - return VisitLogisticNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::MAX_POOL_2D: - return VisitMaxPool2DNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::MAXIMUM: - return VisitMaximumNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::MEAN: - return VisitMeanNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::MINIMUM: - return VisitMinimumNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::MUL: - return VisitMulNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::NEG: - return VisitNegNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::PAD: - return VisitPadNode(subgraph, operation, operands, 0.0f, xnnpackTensors); - case V1_3::OperationType::PAD_V2: - return VisitPadV2Node(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::RESHAPE: - return VisitReshapeNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::RESIZE_BILINEAR: - return VisitResizeBilinearNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::PRELU: - return VisitPreluNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::RELU: - return VisitReluNode(subgraph, operation, operands, 0.0f, - std::numeric_limits<float>::infinity(), xnnpackTensors); - case V1_3::OperationType::RELU1: - return VisitReluNode(subgraph, operation, operands, -1.0f, 1.0f, xnnpackTensors); - case V1_3::OperationType::RELU6: - return VisitReluNode(subgraph, operation, operands, 0.0f, 6.0f, xnnpackTensors); - case V1_3::OperationType::SQRT: - return VisitSqrtNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::SUB: - return VisitSubNode(subgraph, operation, operands, xnnpackTensors); - case V1_3::OperationType::SOFTMAX: - return VisitSoftmaxNode(subgraph, operation, operands, xnnpackTensors); - default: - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - } - - static V1_3::ErrorStatus VisitAbsNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_abs(subgraph, /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_abs FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitAddNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - float outputMin = -std::numeric_limits<float>::infinity(); - float outputMax = +std::numeric_limits<float>::infinity(); - int activation = getScalarData<int32_t>(operands[ins[2]]); - NN_DRIVER_RETURN_IF_ERROR( - ConvertActivationToOutputRange(activation, &outputMin, &outputMax)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_add2(subgraph, outputMin, outputMax, - /*input1_id=*/xnnpackTensors[ins[0]], - /*input2_id=*/xnnpackTensors[ins[1]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_add2 FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitAveragePool2DNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - // Make sure all scalar params are constant. - for (uint32_t i = 1; i < ins.size(); i++) { - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[i]].lifetime)); - } - - bool use_nchw = false; - if (ins.size() == 8) { - use_nchw = getScalarData<bool>(operands[ins[7]]); - } - if (ins.size() == 11) { - use_nchw = getScalarData<bool>(operands[ins[10]]); - } - if (use_nchw) { - VLOG(DRIVER) << "XNNPACK VisitAveragePool2DNode FAILED: only NHWC layout is supported"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - - int32_t stride_width, stride_height, filter_width, filter_height, activation; - uint32_t input_padding_top = 0; - uint32_t input_padding_right = 0; - uint32_t input_padding_bottom = 0; - uint32_t input_padding_left = 0; - uint32_t flags = 0; - if (ins.size() >= 10) { - // Explicit padding - input_padding_left = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[1]])); - input_padding_right = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[2]])); - input_padding_top = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[3]])); - input_padding_bottom = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[4]])); - stride_width = getScalarData<int32_t>(operands[ins[5]]); - stride_height = getScalarData<int32_t>(operands[ins[6]]); - filter_width = getScalarData<int32_t>(operands[ins[7]]); - filter_height = getScalarData<int32_t>(operands[ins[8]]); - activation = getScalarData<int32_t>(operands[ins[9]]); - } else { - // Implicit padding - int padding_implicit = getScalarData<int32_t>(operands[ins[1]]); - NN_DRIVER_RETURN_IF_ERROR(CalculatePadding(padding_implicit, &flags)); - stride_width = getScalarData<int32_t>(operands[ins[2]]); - stride_height = getScalarData<int32_t>(operands[ins[3]]); - filter_width = getScalarData<int32_t>(operands[ins[4]]); - filter_height = getScalarData<int32_t>(operands[ins[5]]); - activation = getScalarData<int32_t>(operands[ins[6]]); - } - NN_DRIVER_RETURN_IF_ERROR( - CheckPoolingParams(stride_width, stride_height, filter_width, filter_height)); - - float outputMin = -std::numeric_limits<float>::infinity(); - float outputMax = +std::numeric_limits<float>::infinity(); - NN_DRIVER_RETURN_IF_ERROR( - ConvertActivationToOutputRange(activation, &outputMin, &outputMax)); - - if (subgraph != nullptr) { - xnn_status status = xnn_status_success; - if (filter_width == 1 && filter_height == 1) { - status = xnn_define_clamp(subgraph, outputMin, outputMax, - /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - } else { - status = xnn_define_average_pooling_2d( - subgraph, input_padding_top, input_padding_right, input_padding_bottom, - input_padding_left, static_cast<uint32_t>(filter_height), - static_cast<uint32_t>(filter_width), static_cast<uint32_t>(stride_height), - static_cast<uint32_t>(stride_width), outputMin, outputMax, - /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], flags); - } - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_average_pooling_2d FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitConv2DNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[1]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[2]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - // Make sure all scalar params are constant. - for (uint32_t i = 3; i < ins.size(); i++) { - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[i]].lifetime)); - } - - bool use_nchw = false; - if (ins.size() >= 8 && operands[ins[7]].type == OperandType::BOOL) { - use_nchw = getScalarData<bool>(operands[ins[7]]); - } - if (ins.size() >= 11) { - use_nchw = getScalarData<bool>(operands[ins[10]]); - } - if (use_nchw) { - VLOG(DRIVER) << "XNNPACK VisitConv2DNode FAILED: only NHWC layout is supported"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - - int32_t stride_width, stride_height, activation; - int32_t dilation_width_factor = 1; - int32_t dilation_height_factor = 1; - uint32_t input_padding_top = 0; - uint32_t input_padding_right = 0; - uint32_t input_padding_bottom = 0; - uint32_t input_padding_left = 0; - uint32_t flags = 0; - if (ins.size() >= 10 && operands[ins[7]].type != OperandType::BOOL) { - // Explicit padding - input_padding_left = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[3]])); - input_padding_right = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[4]])); - input_padding_top = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[5]])); - input_padding_bottom = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[6]])); - stride_width = getScalarData<int32_t>(operands[ins[7]]); - stride_height = getScalarData<int32_t>(operands[ins[8]]); - activation = getScalarData<int32_t>(operands[ins[9]]); - if (ins.size() == 13) { - dilation_width_factor = getScalarData<int32_t>(operands[ins[11]]); - dilation_height_factor = getScalarData<int32_t>(operands[ins[12]]); - } - } else { - // Implicit padding - int padding_implicit = getScalarData<int32_t>(operands[ins[3]]); - NN_DRIVER_RETURN_IF_ERROR(CalculatePadding(padding_implicit, &flags)); - stride_width = getScalarData<int32_t>(operands[ins[4]]); - stride_height = getScalarData<int32_t>(operands[ins[5]]); - activation = getScalarData<int32_t>(operands[ins[6]]); - if (ins.size() == 10) { - dilation_width_factor = getScalarData<int32_t>(operands[ins[8]]); - dilation_height_factor = getScalarData<int32_t>(operands[ins[9]]); - } - } - NN_DRIVER_RETURN_IF_ERROR(CheckConvolutionParams( - stride_width, stride_height, dilation_width_factor, dilation_height_factor)); - - float outputMin = -std::numeric_limits<float>::infinity(); - float outputMax = +std::numeric_limits<float>::infinity(); - NN_DRIVER_RETURN_IF_ERROR( - ConvertActivationToOutputRange(activation, &outputMin, &outputMax)); - - const RunTimeOperandInfo& filter = operands[ins[1]]; - const uint32_t output_channels = filter.dimensions[0]; - const uint32_t kernel_height = filter.dimensions[1]; - const uint32_t kernel_width = filter.dimensions[2]; - const uint32_t input_channels = filter.dimensions[3]; - - if (subgraph != nullptr) { - const xnn_status status = xnn_define_convolution_2d( - subgraph, input_padding_top, input_padding_right, input_padding_bottom, - input_padding_left, static_cast<uint32_t>(kernel_height), - static_cast<uint32_t>(kernel_width), static_cast<uint32_t>(stride_height), - static_cast<uint32_t>(stride_width), - static_cast<uint32_t>(dilation_height_factor), - static_cast<uint32_t>(dilation_width_factor), - /*groups=*/1, static_cast<size_t>(input_channels), - static_cast<size_t>(output_channels), outputMin, outputMax, - /*input_id=*/xnnpackTensors[ins[0]], - /*filter_id=*/xnnpackTensors[ins[1]], - /*bias_id=*/xnnpackTensors[ins[2]], - /*output_id=*/xnnpackTensors[outs[0]], flags); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_convolution_2d FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitDepthwiseConv2DNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[1]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[2]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - // Make sure all scalar params are constant. - for (uint32_t i = 3; i < ins.size(); i++) { - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[i]].lifetime)); - } - - bool use_nchw = false; - if (ins.size() >= 9 && operands[ins[8]].type == OperandType::BOOL) { - use_nchw = getScalarData<bool>(operands[ins[8]]); - } - if (ins.size() >= 12) { - use_nchw = getScalarData<bool>(operands[ins[11]]); - } - if (use_nchw) { - VLOG(DRIVER) - << "XNNPACK VisitDepthwiseConv2DNode FAILED: only NHWC layout is supported"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - - int32_t stride_width, stride_height, depth_multiplier, activation; - int32_t dilation_width_factor = 1; - int32_t dilation_height_factor = 1; - uint32_t input_padding_top = 0; - uint32_t input_padding_right = 0; - uint32_t input_padding_bottom = 0; - uint32_t input_padding_left = 0; - uint32_t flags = 0; - if (ins.size() >= 11 && operands[ins[8]].type != OperandType::BOOL) { - // Explicit padding - input_padding_left = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[3]])); - input_padding_right = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[4]])); - input_padding_top = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[5]])); - input_padding_bottom = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[6]])); - stride_width = getScalarData<int32_t>(operands[ins[7]]); - stride_height = getScalarData<int32_t>(operands[ins[8]]); - depth_multiplier = getScalarData<int32_t>(operands[ins[9]]); - activation = getScalarData<int32_t>(operands[ins[10]]); - if (ins.size() == 14) { - dilation_width_factor = getScalarData<int32_t>(operands[ins[12]]); - dilation_height_factor = getScalarData<int32_t>(operands[ins[13]]); - } - } else { - // Implicit padding - int padding_implicit = getScalarData<int32_t>(operands[ins[3]]); - NN_DRIVER_RETURN_IF_ERROR(CalculatePadding(padding_implicit, &flags)); - stride_width = getScalarData<int32_t>(operands[ins[4]]); - stride_height = getScalarData<int32_t>(operands[ins[5]]); - depth_multiplier = getScalarData<int32_t>(operands[ins[6]]); - activation = getScalarData<int32_t>(operands[ins[7]]); - if (ins.size() == 11) { - dilation_width_factor = getScalarData<int32_t>(operands[ins[9]]); - dilation_height_factor = getScalarData<int32_t>(operands[ins[10]]); - } - } - float outputMin = -std::numeric_limits<float>::infinity(); - float outputMax = +std::numeric_limits<float>::infinity(); - NN_DRIVER_RETURN_IF_ERROR( - ConvertActivationToOutputRange(activation, &outputMin, &outputMax)); - - const RunTimeOperandInfo& filter = operands[ins[1]]; - const uint32_t output_channels = filter.dimensions[3]; - const uint32_t kernel_height = filter.dimensions[1]; - const uint32_t kernel_width = filter.dimensions[2]; - NN_DRIVER_RETURN_IF_ERROR(CheckDepthwiseConvolutionParams( - stride_width, stride_height, dilation_width_factor, dilation_height_factor, - depth_multiplier, output_channels)); - - if (subgraph != nullptr) { - const xnn_status status = xnn_define_depthwise_convolution_2d( - subgraph, input_padding_top, input_padding_right, input_padding_bottom, - input_padding_left, static_cast<uint32_t>(kernel_height), - static_cast<uint32_t>(kernel_width), static_cast<uint32_t>(stride_height), - static_cast<uint32_t>(stride_width), - static_cast<uint32_t>(dilation_height_factor), - static_cast<uint32_t>(dilation_width_factor), - static_cast<uint32_t>(depth_multiplier), - /*input_channels=*/ - static_cast<uint32_t>(output_channels / depth_multiplier), outputMin, outputMax, - /*input_id=*/xnnpackTensors[ins[0]], - /*filter_id=*/xnnpackTensors[ins[1]], - /*bias_id=*/xnnpackTensors[ins[2]], - /*output_id=*/xnnpackTensors[outs[0]], flags); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_depthwise_convolution_2d FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitDivNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - float outputMin = -std::numeric_limits<float>::infinity(); - float outputMax = +std::numeric_limits<float>::infinity(); - int activation = getScalarData<int32_t>(operands[ins[2]]); - NN_DRIVER_RETURN_IF_ERROR( - ConvertActivationToOutputRange(activation, &outputMin, &outputMax)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_divide(subgraph, outputMin, outputMax, - /*input1_id=*/xnnpackTensors[ins[0]], - /*input2_id=*/xnnpackTensors[ins[1]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_divide FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitFullyConnectedNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[1]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[2]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[3]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - float outputMin = -std::numeric_limits<float>::infinity(); - float outputMax = +std::numeric_limits<float>::infinity(); - int activation = getScalarData<int32_t>(operands[ins[3]]); - NN_DRIVER_RETURN_IF_ERROR( - ConvertActivationToOutputRange(activation, &outputMin, &outputMax)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_fully_connected(subgraph, outputMin, outputMax, - /*input_id=*/xnnpackTensors[ins[0]], - /*filter_id=*/xnnpackTensors[ins[1]], - /*bias_id=*/xnnpackTensors[ins[2]], - /*output_id=*/xnnpackTensors[outs[0]], - /*flags=*/XNN_FLAG_TENSORFLOW_RESHAPE_2D); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_fully_connected FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitFloorNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_floor(subgraph, - /*input1_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_floor FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitHardSwishNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_hardswish(subgraph, /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_hardswish FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitLogisticNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_sigmoid(subgraph, /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_sigmoid FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitMaxPool2DNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - // Make sure all scalar params are constant. - for (uint32_t i = 1; i < ins.size(); i++) { - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[i]].lifetime)); - } - - bool use_nchw = false; - if (ins.size() == 8) { - use_nchw = getScalarData<bool>(operands[ins[7]]); - } - if (ins.size() == 11) { - use_nchw = getScalarData<bool>(operands[ins[10]]); - } - if (use_nchw) { - VLOG(DRIVER) << "XNNPACK VisitMaxPool2DNode FAILED: only NHWC layout is supported"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - - int32_t stride_width, stride_height, filter_width, filter_height, activation; - uint32_t input_padding_top = 0; - uint32_t input_padding_right = 0; - uint32_t input_padding_bottom = 0; - uint32_t input_padding_left = 0; - uint32_t flags = 0; - if (ins.size() >= 10) { - // Explicit padding - input_padding_left = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[1]])); - input_padding_right = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[2]])); - input_padding_top = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[3]])); - input_padding_bottom = static_cast<uint32_t>(getScalarData<int32_t>(operands[ins[4]])); - stride_width = getScalarData<int32_t>(operands[ins[5]]); - stride_height = getScalarData<int32_t>(operands[ins[6]]); - filter_width = getScalarData<int32_t>(operands[ins[7]]); - filter_height = getScalarData<int32_t>(operands[ins[8]]); - activation = getScalarData<int32_t>(operands[ins[9]]); - } else { - // Implicit padding - int padding_implicit = getScalarData<int32_t>(operands[ins[1]]); - NN_DRIVER_RETURN_IF_ERROR(CalculatePadding(padding_implicit, &flags)); - stride_width = getScalarData<int32_t>(operands[ins[2]]); - stride_height = getScalarData<int32_t>(operands[ins[3]]); - filter_width = getScalarData<int32_t>(operands[ins[4]]); - filter_height = getScalarData<int32_t>(operands[ins[5]]); - activation = getScalarData<int32_t>(operands[ins[6]]); - } - NN_DRIVER_RETURN_IF_ERROR( - CheckPoolingParams(stride_width, stride_height, filter_width, filter_height)); - - float outputMin = -std::numeric_limits<float>::infinity(); - float outputMax = +std::numeric_limits<float>::infinity(); - NN_DRIVER_RETURN_IF_ERROR( - ConvertActivationToOutputRange(activation, &outputMin, &outputMax)); - - if (subgraph != nullptr) { - xnn_status status = xnn_status_success; - if (filter_width == 1 && filter_height == 1) { - status = xnn_define_clamp(subgraph, outputMin, outputMax, - /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - } else { - status = xnn_define_max_pooling_2d( - subgraph, input_padding_top, input_padding_right, input_padding_bottom, - input_padding_left, static_cast<uint32_t>(filter_height), - static_cast<uint32_t>(filter_width), static_cast<uint32_t>(stride_height), - static_cast<uint32_t>(stride_width), /*dilation_height=*/1, - /*dilation_width=*/1, outputMin, outputMax, - /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], flags); - } - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_max_pooling_2d FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitMaximumNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - float outputMin = -std::numeric_limits<float>::infinity(); - float outputMax = +std::numeric_limits<float>::infinity(); - int activation = getScalarData<int32_t>(operands[ins[2]]); - NN_DRIVER_RETURN_IF_ERROR( - ConvertActivationToOutputRange(activation, &outputMin, &outputMax)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_maximum2(subgraph, - /*input1_id=*/xnnpackTensors[ins[0]], - /*input2_id=*/xnnpackTensors[ins[1]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_maximum2 FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitMeanNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorShape(operands[ins[0]].dimensions, 4)); - NN_DRIVER_RETURN_IF_ERROR(CheckAxesTensorShape(operands[ins[1]].dimensions)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[1]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorShape(operands[outs[0]].dimensions, 4)); - - int keep_dims = getScalarData<int32_t>(operands[ins[2]]); - if (keep_dims <= 0) { - LOG(ERROR) << "XNNPACK VisitMeanNode FAILED: only support keep_dims"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - const int32_t* axes_buffer = reinterpret_cast<const int32_t*>(operands[ins[1]].buffer); - if (operands[ins[1]].dimensions[0] != 2) { - LOG(ERROR) << "XNNPACK VisitMeanNode FAILED: unsupported axes"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (std::min(axes_buffer[0], axes_buffer[1]) != 1 || - std::max(axes_buffer[0], axes_buffer[1]) != 2) { - LOG(ERROR) << "XNNPACK VisitMeanNode FAILED: unsupported axes"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (subgraph != nullptr) { - const xnn_status status = xnn_define_global_average_pooling_2d( - subgraph, - /*outputMin=*/-std::numeric_limits<float>::infinity(), - /*outputMax=*/+std::numeric_limits<float>::infinity(), - /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_global_average_pooling_2d FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitMinimumNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - float outputMin = -std::numeric_limits<float>::infinity(); - float outputMax = +std::numeric_limits<float>::infinity(); - int activation = getScalarData<int32_t>(operands[ins[2]]); - NN_DRIVER_RETURN_IF_ERROR( - ConvertActivationToOutputRange(activation, &outputMin, &outputMax)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_minimum2(subgraph, - /*input1_id=*/xnnpackTensors[ins[0]], - /*input2_id=*/xnnpackTensors[ins[1]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_minimum2 FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitMulNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - int activation = getScalarData<int32_t>(operands[ins[2]]); - float outputMin = -std::numeric_limits<float>::infinity(); - float outputMax = +std::numeric_limits<float>::infinity(); - NN_DRIVER_RETURN_IF_ERROR( - ConvertActivationToOutputRange(activation, &outputMin, &outputMax)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_multiply2(subgraph, outputMin, outputMax, - /*input1_id=*/xnnpackTensors[ins[0]], - /*input2_id=*/xnnpackTensors[ins[1]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_multiply2 FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitNegNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_negate(subgraph, - /*input1_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_negate FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitPreluNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR( - CheckTensorShape(operands[ins[0]].dimensions, 1, XNN_MAX_TENSOR_DIMS)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckSlopeTensorShape(operands[ins[1]].dimensions)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - NN_DRIVER_RETURN_IF_ERROR( - CheckTensorShape(operands[outs[0]].dimensions, 1, XNN_MAX_TENSOR_DIMS)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_prelu(subgraph, /*input_id=*/xnnpackTensors[ins[0]], - /*slope_id=*/xnnpackTensors[ins[1]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_prelu FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitPadNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation, - RunTimeOperandInfo* operands, float padding_value, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR( - CheckTensorShape(operands[ins[0]].dimensions, 1, XNN_MAX_TENSOR_DIMS)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[1]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - NN_DRIVER_RETURN_IF_ERROR( - CheckTensorShape(operands[outs[0]].dimensions, 1, XNN_MAX_TENSOR_DIMS)); - - const int32_t* paddings_data = reinterpret_cast<const int32_t*>(operands[ins[1]].buffer); - for (size_t i = 0; i < operands[ins[1]].dimensions.size() * 2; i++) { - if (paddings_data[i] < 0) return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (subgraph != nullptr) { - std::array<size_t, XNN_MAX_TENSOR_DIMS> pre_paddings{}; - std::array<size_t, XNN_MAX_TENSOR_DIMS> post_paddings{}; - for (size_t i = 0; i < operands[ins[1]].dimensions.size(); i++) { - pre_paddings[i] = static_cast<size_t>(paddings_data[i * 2 + 0]); - post_paddings[i] = static_cast<size_t>(paddings_data[i * 2 + 1]); - } - const xnn_status status = xnn_define_static_constant_pad( - subgraph, pre_paddings.data(), post_paddings.data(), padding_value, - /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_static_constant_pad FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitPadV2Node(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - if (operands[ins[2]].type != OperandType::FLOAT32) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - float padding_value = getScalarData<float>(operands[ins[2]]); - return VisitPadNode(subgraph, operation, operands, padding_value, xnnpackTensors); - } - - static V1_3::ErrorStatus VisitReshapeNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR( - CheckTensorShape(operands[ins[0]].dimensions, 0, XNN_MAX_TENSOR_DIMS)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[1]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - NN_DRIVER_RETURN_IF_ERROR( - CheckTensorShape(operands[outs[0]].dimensions, 0, XNN_MAX_TENSOR_DIMS)); - - if (subgraph != nullptr) { - std::array<size_t, XNN_MAX_TENSOR_DIMS> new_shape; - for (uint32_t i = 0; i < operands[outs[0]].dimensions.size(); i++) { - new_shape[i] = static_cast<size_t>(operands[outs[0]].dimensions[i]); - } - const xnn_status status = xnn_define_static_reshape( - subgraph, static_cast<size_t>(operands[outs[0]].dimensions.size()), - new_shape.data(), - /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_static_reshape FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitResizeBilinearNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorShape(operands[ins[0]].dimensions, 4)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorShape(operands[outs[0]].dimensions, 4)); - // Make sure all scalar params are constant. - for (uint32_t i = 1; i < ins.size(); i++) { - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[i]].lifetime)); - } - - if (ins.size() >= 4) { - bool use_nchw = getScalarData<bool>(operands[ins[3]]); - if (use_nchw) { - VLOG(DRIVER) - << "XNNPACK VisitResizeBilinearNode FAILED: only NHWC layout is supported"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - } - - size_t new_height, new_width; - if (operands[ins[1]].type == OperandType::INT32) { - // explicitly specify the output dimension. - new_width = static_cast<size_t>(getScalarData<int32_t>(operands[ins[1]])); - new_height = static_cast<size_t>(getScalarData<int32_t>(operands[ins[2]])); - } else if (operands[ins[1]].type == OperandType::FLOAT32) { - // specify the output dimension scaling factor. - float width_scale = getScalarData<float>(operands[ins[1]]); - float height_scale = getScalarData<float>(operands[ins[2]]); - if (width_scale <= 0 || height_scale <= 0) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - new_height = static_cast<size_t>(operands[ins[0]].dimensions[1] * height_scale); - new_width = static_cast<size_t>(operands[ins[0]].dimensions[2] * width_scale); - } else { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - - bool align_corners = false; - bool half_pixel_centers = false; - if (ins.size() == 6) { - align_corners = getScalarData<bool>(operands[ins[4]]); - half_pixel_centers = getScalarData<bool>(operands[ins[5]]); - } - if (align_corners && !half_pixel_centers) { - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (subgraph != nullptr) { - uint32_t flags = 0; - if (align_corners) { - flags |= XNN_FLAG_ALIGN_CORNERS; - } else if (!half_pixel_centers) { - flags |= XNN_FLAG_TENSORFLOW_LEGACY_MODE; - } - const xnn_status status = xnn_define_static_resize_bilinear_2d( - subgraph, new_height, new_width, - /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], flags); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_static_resize_bilinear_2d FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitReluNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, float outputMin, - float outputMax, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_clamp(subgraph, outputMin, outputMax, - /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_clamp FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitSqrtNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_square_root(subgraph, - /*input1_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_bankers_rounding FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitSubNode(xnn_subgraph_t subgraph, const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[1]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - float outputMin = -std::numeric_limits<float>::infinity(); - float outputMax = +std::numeric_limits<float>::infinity(); - int activation = getScalarData<int32_t>(operands[ins[2]]); - NN_DRIVER_RETURN_IF_ERROR( - ConvertActivationToOutputRange(activation, &outputMin, &outputMax)); - - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_subtract(subgraph, outputMin, outputMax, - /*input1_id=*/xnnpackTensors[ins[0]], - /*input2_id=*/xnnpackTensors[ins[1]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_subtract FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - return V1_3::ErrorStatus::NONE; - } - - static V1_3::ErrorStatus VisitSoftmaxNode(xnn_subgraph_t subgraph, - const V1_3::Operation& operation, - RunTimeOperandInfo* operands, - const std::vector<uint32_t>& xnnpackTensors) { - const hardware::hidl_vec<uint32_t>& ins = operation.inputs; - const hardware::hidl_vec<uint32_t>& outs = operation.outputs; - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[ins[0]].type)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[1]].lifetime)); - NN_DRIVER_RETURN_IF_ERROR(CheckTensorFloatType(operands[outs[0]].type)); - - float beta = getScalarData<float>(operands[ins[1]]); - if (beta != 1.0f) { - LOG(ERROR) << "XNNPACK VisitSoftmaxNode FAILED, unsupported beta value: " << beta; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (ins.size() >= 3) { - NN_DRIVER_RETURN_IF_ERROR(CheckTensorStaticAllocation(operands[ins[2]].lifetime)); - int axis = getScalarData<int32_t>(operands[ins[2]]); - if (axis != -1) { - LOG(ERROR) << "XNNPACK VisitSoftmaxNode FAILED, unsupported axis value: " << axis; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - } - if (subgraph != nullptr) { - const xnn_status status = - xnn_define_softmax(subgraph, /*input_id=*/xnnpackTensors[ins[0]], - /*output_id=*/xnnpackTensors[outs[0]], /*flags=*/0); - if (status != xnn_status_success) { - LOG(ERROR) << "XNNPACK xnn_define_softmax FAILED"; - return V1_3::ErrorStatus::GENERAL_FAILURE; - } - } - - return V1_3::ErrorStatus::NONE; - } - - private: - Subgraph(xnn_runtime_t runtime, std::unordered_set<uint32_t>&& externals, - bool useStaticBuffer = false) - : mRuntime(runtime, &xnn_delete_runtime), - mExternals(externals), - mUseStaticBuffer(useStaticBuffer) {} - - // XNNPACK Runtime (subgraph + workspace) with smart-pointer for lifetime - // management. - std::unique_ptr<xnn_runtime, decltype(&xnn_delete_runtime)> mRuntime{nullptr, - &xnn_delete_runtime}; - std::unordered_set<uint32_t> mExternals; - bool mFirstRun = true; - bool mUseStaticBuffer; -}; - -class SamplePreparedModelXNNPACK : public SamplePreparedModel { - public: - SamplePreparedModelXNNPACK(const V1_3::Model& model, const SampleDriver* driver, - V1_1::ExecutionPreference preference, uid_t userId, - V1_3::Priority priority) - : SamplePreparedModel(model, driver, preference, userId, priority), - mSubgraph(nullptr), - mThreadpool(nullptr) {} - ~SamplePreparedModelXNNPACK() { - delete mSubgraph; - pthreadpool_destroy(mThreadpool); - }; - bool initialize(); - hardware::Return<V1_0::ErrorStatus> execute( - const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override; - hardware::Return<V1_0::ErrorStatus> execute_1_2( - const V1_0::Request& request, V1_2::MeasureTiming measure, - const sp<V1_2::IExecutionCallback>& callback) override; - hardware::Return<V1_3::ErrorStatus> execute_1_3( - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - const sp<V1_3::IExecutionCallback>& callback) override; - hardware::Return<void> executeSynchronously(const V1_0::Request& request, - V1_2::MeasureTiming measure, - executeSynchronously_cb cb) override; - hardware::Return<void> executeSynchronously_1_3( - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - executeSynchronously_1_3_cb cb) override; - hardware::Return<void> configureExecutionBurst( - const sp<V1_2::IBurstCallback>& callback, - const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel, - const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, - configureExecutionBurst_cb cb) override; - hardware::Return<void> executeFenced(const V1_3::Request& request, - const hardware::hidl_vec<hardware::hidl_handle>& wait_for, - V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - const V1_3::OptionalTimeoutDuration& duration, - executeFenced_cb callback) override; - - private: - Subgraph* mSubgraph; - std::vector<RunTimeOperandInfo> mOperands; - pthreadpool* mThreadpool; -}; - -hardware::Return<void> SamplePreparedModelXNNPACK::configureExecutionBurst( - const sp<V1_2::IBurstCallback>& callback, - const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel, - const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, - configureExecutionBurst_cb cb) { - VLOG(DRIVER) << "SamplePreparedModelXNNPACK::configureExecutionBurst not supported"; - cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}); - return hardware::Void(); -} - -bool SamplePreparedModelXNNPACK::initialize() { - auto status = SamplePreparedModel::initialize(); - mThreadpool = pthreadpool_create(kNumOfWorkerThreads); - if (mThreadpool == nullptr) { - VLOG(DRIVER) << "SamplePreparedModelXNNPACK::initialize failed to create pthreadpool, " - "fallback to single threaded execution"; - } - const V1_3::Model* model = getModel(); - mOperands = initializeRunTimeInfo(model->main, mPoolInfos, &model->operandValues); - mSubgraph = Subgraph::Create(model->main.operations, mOperands, model->main.inputIndexes, - model->main.outputIndexes, mThreadpool); - return status; -} - -template <typename T_IExecutionCallback> -void asyncExecuteXNNPACK(Subgraph* subgraph, RunTimeOperandInfo* operands, - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::Model& model, const LegacyOptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - const sp<T_IExecutionCallback>& callback) { - std::vector<RunTimePoolInfo> requestPoolInfos; - if (!setRunTimePoolInfosFromMemoryPools(&requestPoolInfos, uncheckedConvert(request.pools))) { - notify(callback, V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming); - } - updateForArguments(model.main.inputIndexes, request.inputs, requestPoolInfos, operands); - updateForArguments(model.main.outputIndexes, request.outputs, requestPoolInfos, operands); - auto status = subgraph->Invoke(operands); - VLOG(DRIVER) << "XNNPACK subgraph invoke returned " << toString(status); - if (status == V1_3::ErrorStatus::NONE) { - VLOG(DRIVER) << "Completed run normally"; - for (auto& runtimeInfo : requestPoolInfos) { - runtimeInfo.flush(); - } - } - notify(callback, status, {}, kNoTiming); -} - -template <typename T_IExecutionCallback> -V1_3::ErrorStatus executeXNNPACKBase(Subgraph* subgraph, RunTimeOperandInfo* operands, - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::Model& model, - const V1_3::OptionalTimePoint& halDeadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - const sp<T_IExecutionCallback>& callback) { - VLOG(DRIVER) << "executeXNNPACKBase(" << SHOW_IF_DEBUG(toString(request)) << ")"; - - if (callback.get() == nullptr) { - LOG(ERROR) << "invalid callback passed to executeXNNPACKBase"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (!validateRequest(request, model, /*allowUnspecifiedOutput=*/false)) { - notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming); - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - const auto deadline = makeDeadline(halDeadline); - if (hasDeadlinePassed(deadline)) { - notify(callback, V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming); - return V1_3::ErrorStatus::NONE; - } - - // This thread is intentionally detached because the sample driver service - // is expected to live forever. - std::thread([&subgraph, &operands, &model, request, measure, deadline, loopTimeoutDuration, - callback] { - asyncExecuteXNNPACK(subgraph, operands, request, measure, model, deadline, - loopTimeoutDuration, callback); - }).detach(); - - return V1_3::ErrorStatus::NONE; -} - -hardware::Return<V1_0::ErrorStatus> SamplePreparedModelXNNPACK::execute( - const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) { - const V1_3::Model* model = getModel(); - const V1_3::ErrorStatus status = - executeXNNPACKBase(mSubgraph, mOperands.data(), convertToV1_3(request), - V1_2::MeasureTiming::NO, *model, {}, {}, callback); - return convertToV1_0(status); -} - -hardware::Return<V1_0::ErrorStatus> SamplePreparedModelXNNPACK::execute_1_2( - const V1_0::Request& request, V1_2::MeasureTiming measure, - const sp<V1_2::IExecutionCallback>& callback) { - const V1_3::Model* model = getModel(); - const V1_3::ErrorStatus status = executeXNNPACKBase( - mSubgraph, mOperands.data(), convertToV1_3(request), measure, *model, {}, {}, callback); - return convertToV1_0(status); -} - -hardware::Return<V1_3::ErrorStatus> SamplePreparedModelXNNPACK::execute_1_3( - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - const sp<V1_3::IExecutionCallback>& callback) { - const V1_3::Model* model = getModel(); - return executeXNNPACKBase(mSubgraph, mOperands.data(), request, measure, *model, deadline, - loopTimeoutDuration, callback); -} - -static std::tuple<V1_3::ErrorStatus, hardware::hidl_vec<V1_2::OutputShape>, V1_2::Timing> -executeSynchronouslyXNNPACKBase(Subgraph* subgraph, RunTimeOperandInfo* operands, - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::Model& model, - const V1_3::OptionalTimePoint& halDeadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration) { - VLOG(DRIVER) << "executeSynchronouslyXNNPACKBase(" << SHOW_IF_DEBUG(toString(request)) << ")"; - - if (!validateRequest(request, model, /*allowUnspecifiedOutput=*/false)) { - return {V1_3::ErrorStatus::INVALID_ARGUMENT, {}, kNoTiming}; - } - const auto deadline = makeDeadline(halDeadline); - if (hasDeadlinePassed(deadline)) { - return {V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, {}, kNoTiming}; - } - - std::vector<RunTimePoolInfo> requestPoolInfos; - if (!setRunTimePoolInfosFromMemoryPools(&requestPoolInfos, uncheckedConvert(request.pools))) { - return {V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming}; - } - updateForArguments(model.main.inputIndexes, request.inputs, requestPoolInfos, operands); - updateForArguments(model.main.outputIndexes, request.outputs, requestPoolInfos, operands); - VLOG(DRIVER) << "XNNPACK subgraph invoke started"; - auto status = subgraph->Invoke(operands); - VLOG(DRIVER) << "XNNPACK subgraph invoke returned " << toString(status); - if (status == V1_3::ErrorStatus::NONE) { - VLOG(DRIVER) << "Completed run normally"; - for (auto& runtimeInfo : requestPoolInfos) { - runtimeInfo.flush(); - } - } - return {status, {}, kNoTiming}; -} - -hardware::Return<void> SamplePreparedModelXNNPACK::executeSynchronously( - const V1_0::Request& request, V1_2::MeasureTiming measure, executeSynchronously_cb cb) { - const V1_3::Model* model = getModel(); - auto [status, outputShapes, timing] = executeSynchronouslyXNNPACKBase( - mSubgraph, mOperands.data(), convertToV1_3(request), measure, *model, {}, {}); - cb(convertToV1_0(status), std::move(outputShapes), timing); - return hardware::Void(); -} - -hardware::Return<void> SamplePreparedModelXNNPACK::executeSynchronously_1_3( - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, executeSynchronously_1_3_cb cb) { - const V1_3::Model* model = getModel(); - auto [status, outputShapes, timing] = executeSynchronouslyXNNPACKBase( - mSubgraph, mOperands.data(), request, measure, *model, deadline, loopTimeoutDuration); - cb(status, std::move(outputShapes), timing); - return hardware::Void(); -} - -// The sample driver will finish the execution and then return. -hardware::Return<void> SamplePreparedModelXNNPACK::executeFenced( - const V1_3::Request& request, const hardware::hidl_vec<hardware::hidl_handle>& waitFor, - V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& halDeadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - const V1_3::OptionalTimeoutDuration& duration, executeFenced_cb cb) { - VLOG(DRIVER) << "executeFenced(" << SHOW_IF_DEBUG(toString(request)) << ")"; - const V1_3::Model* model = getModel(); - if (!validateRequest(request, *model, /*allowUnspecifiedOutput=*/false)) { - cb(V1_3::ErrorStatus::INVALID_ARGUMENT, hardware::hidl_handle(nullptr), nullptr); - return hardware::Void(); - } - const auto deadline = makeDeadline(halDeadline); - if (hasDeadlinePassed(deadline)) { - cb(V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, hardware::hidl_handle(nullptr), nullptr); - return hardware::Void(); - } - - // Wait for the dependent events to signal - for (const auto& fenceHandle : waitFor) { - if (!fenceHandle.getNativeHandle()) { - cb(V1_3::ErrorStatus::INVALID_ARGUMENT, hardware::hidl_handle(nullptr), nullptr); - return hardware::Void(); - } - int syncFenceFd = fenceHandle.getNativeHandle()->data[0]; - if (syncWait(syncFenceFd, -1) != FenceState::SIGNALED) { - LOG(ERROR) << "syncWait failed"; - cb(V1_3::ErrorStatus::GENERAL_FAILURE, hardware::hidl_handle(nullptr), nullptr); - return hardware::Void(); - } - } - std::vector<RunTimePoolInfo> requestPoolInfos; - if (!setRunTimePoolInfosFromMemoryPools(&requestPoolInfos, uncheckedConvert(request.pools))) { - cb(V1_3::ErrorStatus::GENERAL_FAILURE, hardware::hidl_handle(nullptr), nullptr); - } - updateForArguments(model->main.inputIndexes, request.inputs, requestPoolInfos, - mOperands.data()); - updateForArguments(model->main.outputIndexes, request.outputs, requestPoolInfos, - mOperands.data()); - auto status = mSubgraph->Invoke(mOperands.data()); - VLOG(DRIVER) << "XNNPACK subgraph invoke returned " << toString(status); - if (status == V1_3::ErrorStatus::NONE) { - VLOG(DRIVER) << "Completed run normally"; - for (auto& runtimeInfo : requestPoolInfos) { - runtimeInfo.flush(); - } - } - - sp<SampleFencedExecutionCallback> fencedExecutionCallback = - new SampleFencedExecutionCallback(kNoTiming, kNoTiming, status); - cb(status, hardware::hidl_handle(nullptr), fencedExecutionCallback); - return hardware::Void(); -} - -class SampleDriverFloatXNNPACK : public SampleDriverPartial { - public: - SampleDriverFloatXNNPACK() : SampleDriverPartial("nnapi-sample_float_xnnpack") {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; - hardware::Return<V1_0::ErrorStatus> prepareModel( - const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) override; - hardware::Return<V1_0::ErrorStatus> prepareModel_1_1( - const V1_1::Model& model, V1_1::ExecutionPreference preference, - const sp<V1_0::IPreparedModelCallback>& callback) override; - hardware::Return<V1_0::ErrorStatus> prepareModel_1_2( - const V1_2::Model& model, V1_1::ExecutionPreference preference, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token, - const sp<V1_2::IPreparedModelCallback>& callback) override; - hardware::Return<V1_3::ErrorStatus> prepareModel_1_3( - const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, - const V1_3::OptionalTimePoint& deadline, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token, - const sp<V1_3::IPreparedModelCallback>& callback) override; - hardware::Return<void> allocate( - const V1_3::BufferDesc& desc, - const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels, - const hardware::hidl_vec<V1_3::BufferRole>& inputRoles, - const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) override; - - private: - std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override; -}; - -template <typename T_Model, typename T_IPreparedModelCallback> -V1_3::ErrorStatus prepareModelXNNPACK(const T_Model& model, const SampleDriver* driver, - V1_1::ExecutionPreference preference, V1_3::Priority priority, - const V1_3::OptionalTimePoint& deadline, - const sp<T_IPreparedModelCallback>& callback) { - const uid_t userId = hardware::IPCThreadState::self()->getCallingUid(); - if (callback.get() == nullptr) { - LOG(ERROR) << "invalid callback passed to prepareModelBase"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - if (VLOG_IS_ON(DRIVER)) { - VLOG(DRIVER) << "prepareModelBase"; - logModelToInfo(model); - } - if (!validateModel(model) || !validateExecutionPreference(preference) || - !validatePriority(priority)) { - notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr); - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } - - // asynchronously prepare the model from a new, detached thread - std::thread([model, driver, preference, userId, priority, callback] { - sp<SamplePreparedModelXNNPACK> preparedModel = new SamplePreparedModelXNNPACK( - convertToV1_3(model), driver, preference, userId, priority); - if (!preparedModel->initialize()) { - notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr); - return; - } - notify(callback, V1_3::ErrorStatus::NONE, preparedModel); - }).detach(); - - return V1_3::ErrorStatus::NONE; -} - -hardware::Return<V1_0::ErrorStatus> SampleDriverFloatXNNPACK::prepareModel( - const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) { - const V1_3::ErrorStatus status = - prepareModelXNNPACK(model, this, V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, - kDefaultPriority13, {}, callback); - return convertToV1_0(status); -} - -hardware::Return<V1_0::ErrorStatus> SampleDriverFloatXNNPACK::prepareModel_1_1( - const V1_1::Model& model, V1_1::ExecutionPreference preference, - const sp<V1_0::IPreparedModelCallback>& callback) { - const V1_3::ErrorStatus status = - prepareModelXNNPACK(model, this, preference, kDefaultPriority13, {}, callback); - return convertToV1_0(status); -} - -hardware::Return<V1_0::ErrorStatus> SampleDriverFloatXNNPACK::prepareModel_1_2( - const V1_2::Model& model, V1_1::ExecutionPreference preference, - const hardware::hidl_vec<hardware::hidl_handle>&, - const hardware::hidl_vec<hardware::hidl_handle>&, const HalCacheToken&, - const sp<V1_2::IPreparedModelCallback>& callback) { - const V1_3::ErrorStatus status = - prepareModelXNNPACK(model, this, preference, kDefaultPriority13, {}, callback); - return convertToV1_0(status); -} - -hardware::Return<V1_3::ErrorStatus> SampleDriverFloatXNNPACK::prepareModel_1_3( - const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, - const V1_3::OptionalTimePoint& deadline, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token, - const sp<V1_3::IPreparedModelCallback>& callback) { - return prepareModelXNNPACK(model, this, preference, priority, deadline, callback); -} - -hardware::Return<void> SampleDriverFloatXNNPACK::getCapabilities_1_3(getCapabilities_1_3_cb cb) { - android::nn::initVLogMask(); - VLOG(DRIVER) << "SampleDriverFloatXNNPACK::getCapabilities()"; - - V1_3::Capabilities capabilities = { - .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.7f, .powerUsage = 1.1f}, - .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.7f, .powerUsage = 1.1f}, - .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({1.0f, 1.0f}), - .ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f}, - .whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}}; - update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32, - {.execTime = 0.8f, .powerUsage = 1.2f}); - update(&capabilities.operandPerformance, V1_3::OperandType::FLOAT32, - {.execTime = 0.8f, .powerUsage = 1.2f}); - - cb(V1_3::ErrorStatus::NONE, capabilities); - return hardware::Void(); -} - -std::vector<bool> SampleDriverFloatXNNPACK::getSupportedOperationsImpl( - const V1_3::Model& model) const { - std::vector<RunTimePoolInfo> poolInfos; - setRunTimePoolInfosFromCanonicalMemories(&poolInfos, uncheckedConvert(model.pools)); - auto operands = initializeRunTimeInfo(model.main, poolInfos, &model.operandValues); - const size_t count = model.main.operations.size(); - std::vector<bool> supported(count); - for (size_t i = 0; i < count; i++) { - bool isSupportedOp = false; - const V1_3::Operation& operation = model.main.operations[i]; - if (Subgraph::VisitNode(/*subgraph=*/nullptr, operation, operands.data(), {}) == - V1_3::ErrorStatus::NONE) { - isSupportedOp = true; - } - supported[i] = isSupportedOp; - } - return supported; -} - -hardware::Return<void> SampleDriverFloatXNNPACK::allocate( - const V1_3::BufferDesc& desc, - const hardware::hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels, - const hardware::hidl_vec<V1_3::BufferRole>& inputRoles, - const hardware::hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) { - VLOG(DRIVER) << "SampleDriverFloatXNNPACK::allocate not supported"; - constexpr uint32_t kInvalidBufferToken = 0; - cb(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr, kInvalidBufferToken); - return hardware::Void(); -} - -} // namespace sample_driver -} // namespace nn -} // namespace android - -using android::sp; -using android::nn::sample_driver::SampleDriverFloatXNNPACK; - -int main() { - sp<SampleDriverFloatXNNPACK> driver(new SampleDriverFloatXNNPACK()); - xnn_status status = xnn_initialize(/*allocator=*/nullptr); - if (status != xnn_status_success) { - return 0; - } - return driver->run(); -}
diff --git a/driver/sample/SampleDriverFull.cpp b/driver/sample/SampleDriverFull.cpp index c8fcde6..5635517 100644 --- a/driver/sample/SampleDriverFull.cpp +++ b/driver/sample/SampleDriverFull.cpp
@@ -18,44 +18,42 @@ #include "SampleDriverFull.h" -#include <Utils.h> -#include <ValidateHal.h> - #include <vector> +#include "Utils.h" +#include "ValidateHal.h" + namespace android { namespace nn { namespace sample_driver { -hardware::Return<void> SampleDriverFull::getCapabilities_1_3(getCapabilities_1_3_cb cb) { +using namespace hal; + +Return<void> SampleDriverFull::getCapabilities_1_3(getCapabilities_1_3_cb cb) { android::nn::initVLogMask(); VLOG(DRIVER) << "getCapabilities_1_3()"; - V1_3::Capabilities capabilities = { + Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = mPerf, .relaxedFloat32toFloat16PerformanceTensor = mPerf, .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(mPerf), .ifPerformance = mPerf, .whilePerformance = mPerf}; - cb(V1_3::ErrorStatus::NONE, capabilities); - return hardware::Void(); + cb(ErrorStatus::NONE, capabilities); + return Void(); } -hardware::Return<void> SampleDriverFull::getSupportedOperations_1_3( - const V1_3::Model& model, getSupportedOperations_1_3_cb cb) { +Return<void> SampleDriverFull::getSupportedOperations_1_3(const V1_3::Model& model, + getSupportedOperations_1_3_cb cb) { VLOG(DRIVER) << "getSupportedOperations_1_3()"; if (validateModel(model)) { const size_t count = model.main.operations.size(); std::vector<bool> supported(count, true); - for (size_t i = 0; i < count; i++) { - const V1_3::Operation& operation = model.main.operations[i]; - supported[i] = !isExtensionOperationType(operation.type); - } - cb(V1_3::ErrorStatus::NONE, supported); + cb(ErrorStatus::NONE, supported); } else { std::vector<bool> supported; - cb(V1_3::ErrorStatus::INVALID_ARGUMENT, supported); + cb(ErrorStatus::INVALID_ARGUMENT, supported); } - return hardware::Void(); + return Void(); } } // namespace sample_driver
diff --git a/driver/sample/SampleDriverFull.h b/driver/sample/SampleDriverFull.h index ed4910d..155463a 100644 --- a/driver/sample/SampleDriverFull.h +++ b/driver/sample/SampleDriverFull.h
@@ -17,8 +17,7 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_SAMPLE_DRIVER_FULL_H #define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_SAMPLE_DRIVER_FULL_H -#include <HalInterfaces.h> - +#include "HalInterfaces.h" #include "SampleDriver.h" namespace android { @@ -27,14 +26,14 @@ class SampleDriverFull : public SampleDriver { public: - SampleDriverFull(const char* name, V1_0::PerformanceInfo perf) + SampleDriverFull(const char* name, hal::PerformanceInfo perf) : SampleDriver(name), mPerf(perf) {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; - hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model, - getSupportedOperations_1_3_cb cb) override; + hal::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; + hal::Return<void> getSupportedOperations_1_3(const hal::V1_3::Model& model, + getSupportedOperations_1_3_cb cb) override; private: - V1_0::PerformanceInfo mPerf; + hal::PerformanceInfo mPerf; }; } // namespace sample_driver
diff --git a/driver/sample/SampleDriverMinimal.cpp b/driver/sample/SampleDriverMinimal.cpp index 6456b57..15b59dc 100644 --- a/driver/sample/SampleDriverMinimal.cpp +++ b/driver/sample/SampleDriverMinimal.cpp
@@ -16,46 +16,50 @@ #define LOG_TAG "SampleDriverMinimal" -#include <HalInterfaces.h> -#include <Utils.h> #include <android-base/logging.h> +#include <hidl/LegacySupport.h> #include <thread> #include <vector> +#include "HalInterfaces.h" #include "NeuralNetworksOEM.h" #include "SampleDriverPartial.h" +#include "Utils.h" +#include "ValidateHal.h" namespace android { namespace nn { namespace sample_driver { +using namespace hal; + class SampleDriverMinimal : public SampleDriverPartial { public: SampleDriverMinimal() : SampleDriverPartial("nnapi-sample_minimal") {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; private: std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override; }; -hardware::Return<void> SampleDriverMinimal::getCapabilities_1_3(getCapabilities_1_3_cb cb) { +Return<void> SampleDriverMinimal::getCapabilities_1_3(getCapabilities_1_3_cb cb) { android::nn::initVLogMask(); VLOG(DRIVER) << "getCapabilities()"; - V1_3::Capabilities capabilities = { + Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.4f, .powerUsage = 0.5f}, .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.4f, .powerUsage = 0.5f}, .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({1.0f, 1.0f}), .ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f}, .whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}}; - update(&capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32, + update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32, {.execTime = 0.4f, .powerUsage = 0.5f}); - update(&capabilities.operandPerformance, V1_3::OperandType::FLOAT32, + update(&capabilities.operandPerformance, OperandType::FLOAT32, {.execTime = 0.4f, .powerUsage = 0.5f}); - cb(V1_3::ErrorStatus::NONE, capabilities); - return hardware::Void(); + cb(ErrorStatus::NONE, capabilities); + return Void(); } std::vector<bool> SampleDriverMinimal::getSupportedOperationsImpl(const V1_3::Model& model) const { @@ -64,13 +68,13 @@ // Simulate supporting just a few ops for (size_t i = 0; i < count; i++) { supported[i] = false; - const V1_3::Operation& operation = model.main.operations[i]; + const Operation& operation = model.main.operations[i]; switch (operation.type) { - case V1_3::OperationType::ADD: - case V1_3::OperationType::CONCATENATION: - case V1_3::OperationType::CONV_2D: { - const V1_3::Operand& firstOperand = model.main.operands[operation.inputs[0]]; - if (firstOperand.type == V1_3::OperandType::TENSOR_FLOAT32) { + case OperationType::ADD: + case OperationType::CONCATENATION: + case OperationType::CONV_2D: { + const Operand& firstOperand = model.main.operands[operation.inputs[0]]; + if (firstOperand.type == OperandType::TENSOR_FLOAT32) { supported[i] = true; } break;
diff --git a/driver/sample/SampleDriverPartial.cpp b/driver/sample/SampleDriverPartial.cpp index 71d5a3a..2ba3d9a 100644 --- a/driver/sample/SampleDriverPartial.cpp +++ b/driver/sample/SampleDriverPartial.cpp
@@ -18,37 +18,40 @@ #include "SampleDriverPartial.h" -#include <HalInterfaces.h> -#include <Utils.h> -#include <ValidateHal.h> #include <android-base/logging.h> +#include <hidl/LegacySupport.h> #include <thread> #include <vector> +#include "HalInterfaces.h" #include "SampleDriverUtils.h" +#include "Utils.h" +#include "ValidateHal.h" namespace android { namespace nn { namespace sample_driver { -hardware::Return<void> SampleDriverPartial::getSupportedOperations_1_3( - const V1_3::Model& model, getSupportedOperations_1_3_cb cb) { +using namespace hal; + +Return<void> SampleDriverPartial::getSupportedOperations_1_3(const V1_3::Model& model, + getSupportedOperations_1_3_cb cb) { VLOG(DRIVER) << "getSupportedOperations()"; if (validateModel(model)) { std::vector<bool> supported = getSupportedOperationsImpl(model); - cb(V1_3::ErrorStatus::NONE, supported); + cb(ErrorStatus::NONE, supported); } else { std::vector<bool> supported; - cb(V1_3::ErrorStatus::INVALID_ARGUMENT, supported); + cb(ErrorStatus::INVALID_ARGUMENT, supported); } - return hardware::Void(); + return Void(); } -hardware::Return<V1_3::ErrorStatus> SampleDriverPartial::prepareModel_1_3( - const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, - const V1_3::OptionalTimePoint& deadline, const hardware::hidl_vec<hardware::hidl_handle>&, - const hardware::hidl_vec<hardware::hidl_handle>&, const HalCacheToken&, +Return<ErrorStatus> SampleDriverPartial::prepareModel_1_3( + const V1_3::Model& model, ExecutionPreference preference, Priority priority, + const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>&, + const hidl_vec<hidl_handle>&, const CacheToken&, const sp<V1_3::IPreparedModelCallback>& callback) { std::vector<bool> supported = getSupportedOperationsImpl(model); bool isModelFullySupported =
diff --git a/driver/sample/SampleDriverPartial.h b/driver/sample/SampleDriverPartial.h index 6ef1b08..17a86a3 100644 --- a/driver/sample/SampleDriverPartial.h +++ b/driver/sample/SampleDriverPartial.h
@@ -14,17 +14,16 @@ * limitations under the License. */ -#ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_SAMPLE_DRIVER_PARTIAL_H -#define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_SAMPLE_DRIVER_PARTIAL_H - -#include <HalInterfaces.h> -#include <Utils.h> #include <android-base/logging.h> +#include <hidl/LegacySupport.h> #include <thread> #include <vector> +#include "HalInterfaces.h" #include "SampleDriver.h" +#include "Utils.h" +#include "ValidateHal.h" namespace android { namespace nn { @@ -39,23 +38,21 @@ SampleDriverPartial(const char* name, const IOperationResolver* operationResolver = BuiltinOperationResolver::get()) : SampleDriver(name, operationResolver) {} - hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model, - getSupportedOperations_1_3_cb cb) override; - hardware::Return<V1_3::ErrorStatus> prepareModel_1_3( - const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, - const V1_3::OptionalTimePoint& deadline, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token, - const sp<V1_3::IPreparedModelCallback>& callback) override; + hal::Return<void> getSupportedOperations_1_3(const hal::V1_3::Model& model, + getSupportedOperations_1_3_cb cb) override; + hal::Return<hal::ErrorStatus> prepareModel_1_3( + const hal::V1_3::Model& model, hal::ExecutionPreference preference, + hal::Priority priority, const hal::OptionalTimePoint& deadline, + const hal::hidl_vec<hal::hidl_handle>& modelCache, + const hal::hidl_vec<hal::hidl_handle>& dataCache, const hal::CacheToken& token, + const sp<hal::V1_3::IPreparedModelCallback>& callback) override; protected: // Given a valid NNAPI Model returns a boolean vector that indicates which // ops in the model are supported by a driver. - virtual std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const = 0; + virtual std::vector<bool> getSupportedOperationsImpl(const hal::V1_3::Model& model) const = 0; }; } // namespace sample_driver } // namespace nn } // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_SAMPLE_DRIVER_PARTIAL_H
diff --git a/driver/sample/SampleDriverQuant.cpp b/driver/sample/SampleDriverQuant.cpp index 89ac7aa..39d02a6 100644 --- a/driver/sample/SampleDriverQuant.cpp +++ b/driver/sample/SampleDriverQuant.cpp
@@ -16,59 +16,62 @@ #define LOG_TAG "SampleDriverQuant" -#include <HalInterfaces.h> -#include <Utils.h> #include <android-base/logging.h> #include <hidl/LegacySupport.h> #include <thread> #include <vector> +#include "HalInterfaces.h" #include "SampleDriverPartial.h" +#include "Utils.h" +#include "ValidateHal.h" namespace android { namespace nn { namespace sample_driver { +using namespace hal; + class SampleDriverQuant : public SampleDriverPartial { public: SampleDriverQuant() : SampleDriverPartial("nnapi-sample_quant") {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; private: std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override; }; -hardware::Return<void> SampleDriverQuant::getCapabilities_1_3(getCapabilities_1_3_cb cb) { +Return<void> SampleDriverQuant::getCapabilities_1_3(getCapabilities_1_3_cb cb) { android::nn::initVLogMask(); VLOG(DRIVER) << "getCapabilities()"; - V1_3::Capabilities capabilities = { + Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 50.0f, .powerUsage = 1.0f}, .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 50.0f, .powerUsage = 1.0f}, .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>({50.0f, 1.0f}), .ifPerformance = {.execTime = 50.0f, .powerUsage = 1.0f}, .whilePerformance = {.execTime = 50.0f, .powerUsage = 1.0f}}; - cb(V1_3::ErrorStatus::NONE, capabilities); - return hardware::Void(); + cb(ErrorStatus::NONE, capabilities); + return Void(); } -static bool isQuantized(V1_3::OperandType opType) { - return opType == V1_3::OperandType::TENSOR_QUANT8_ASYMM || - opType == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED; +static bool isQuantized(OperandType opType) { + return opType == OperandType::TENSOR_QUANT8_ASYMM || + opType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED; } std::vector<bool> SampleDriverQuant::getSupportedOperationsImpl(const V1_3::Model& model) const { const size_t count = model.main.operations.size(); std::vector<bool> supported(count); for (size_t i = 0; i < count; i++) { - const V1_3::Operation& operation = model.main.operations[i]; - if (!isExtensionOperationType(operation.type) && operation.inputs.size() > 0) { - const V1_3::Operand& firstOperand = model.main.operands[operation.inputs[0]]; + const Operation& operation = model.main.operations[i]; + if (operation.inputs.size() > 0) { + const Operand& firstOperand = model.main.operands[operation.inputs[0]]; supported[i] = isQuantized(firstOperand.type); - if (operation.type == V1_3::OperationType::SELECT) { - const V1_3::Operand& secondOperand = model.main.operands[operation.inputs[1]]; + if (operation.type == OperationType::SELECT) { + const Operand& secondOperand = model.main.operands[operation.inputs[1]]; supported[i] = isQuantized(secondOperand.type); } }
diff --git a/driver/sample/SampleDriverSL.cpp b/driver/sample/SampleDriverSL.cpp deleted file mode 100644 index bccef3e..0000000 --- a/driver/sample/SampleDriverSL.cpp +++ /dev/null
@@ -1,30 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <CanonicalDevice.h> -#include <nnapi/IDevice.h> - -#include <memory> -#include <vector> - -namespace android::nn { - -std::vector<SharedDevice> getDevices() { - auto device = std::make_shared<sample::Device>("nnapi-sample_sl"); - return {device}; -} - -} // namespace android::nn
diff --git a/driver/sample/SampleDriverUtils.cpp b/driver/sample/SampleDriverUtils.cpp index 08fd47d..7cccf92 100644 --- a/driver/sample/SampleDriverUtils.cpp +++ b/driver/sample/SampleDriverUtils.cpp
@@ -15,16 +15,17 @@ */ #include "SampleDriverUtils.h" +#include "SampleDriver.h" #include <android-base/logging.h> -#include "SampleDriver.h" - namespace android { namespace nn { namespace sample_driver { -void notify(const sp<V1_0::IPreparedModelCallback>& callback, const V1_3::ErrorStatus& status, +using namespace hal; + +void notify(const sp<V1_0::IPreparedModelCallback>& callback, const ErrorStatus& status, const sp<SamplePreparedModel>& preparedModel) { const auto ret = callback->notify(convertToV1_0(status), preparedModel); if (!ret.isOk()) { @@ -32,7 +33,7 @@ } } -void notify(const sp<V1_2::IPreparedModelCallback>& callback, const V1_3::ErrorStatus& status, +void notify(const sp<V1_2::IPreparedModelCallback>& callback, const ErrorStatus& status, const sp<SamplePreparedModel>& preparedModel) { const auto ret = callback->notify_1_2(convertToV1_0(status), preparedModel); if (!ret.isOk()) { @@ -41,7 +42,7 @@ } } -void notify(const sp<V1_3::IPreparedModelCallback>& callback, const V1_3::ErrorStatus& status, +void notify(const sp<V1_3::IPreparedModelCallback>& callback, const ErrorStatus& status, const sp<SamplePreparedModel>& preparedModel) { const auto ret = callback->notify_1_3(status, preparedModel); if (!ret.isOk()) { @@ -50,24 +51,24 @@ } } -void notify(const sp<V1_0::IExecutionCallback>& callback, const V1_3::ErrorStatus& status, - const hardware::hidl_vec<V1_2::OutputShape>&, V1_2::Timing) { +void notify(const sp<V1_0::IExecutionCallback>& callback, const ErrorStatus& status, + const hidl_vec<OutputShape>&, Timing) { const auto ret = callback->notify(convertToV1_0(status)); if (!ret.isOk()) { LOG(ERROR) << "Error when calling IExecutionCallback::notify: " << ret.description(); } } -void notify(const sp<V1_2::IExecutionCallback>& callback, const V1_3::ErrorStatus& status, - const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, V1_2::Timing timing) { +void notify(const sp<V1_2::IExecutionCallback>& callback, const ErrorStatus& status, + const hidl_vec<OutputShape>& outputShapes, Timing timing) { const auto ret = callback->notify_1_2(convertToV1_0(status), outputShapes, timing); if (!ret.isOk()) { LOG(ERROR) << "Error when calling IExecutionCallback::notify_1_2: " << ret.description(); } } -void notify(const sp<V1_3::IExecutionCallback>& callback, const V1_3::ErrorStatus& status, - const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, V1_2::Timing timing) { +void notify(const sp<V1_3::IExecutionCallback>& callback, const ErrorStatus& status, + const hidl_vec<OutputShape>& outputShapes, Timing timing) { const auto ret = callback->notify_1_3(status, outputShapes, timing); if (!ret.isOk()) { LOG(ERROR) << "Error when calling IExecutionCallback::notify_1_3" << ret.description();
diff --git a/driver/sample/SampleDriverUtils.h b/driver/sample/SampleDriverUtils.h index 6e30ac8..d5a87a1 100644 --- a/driver/sample/SampleDriverUtils.h +++ b/driver/sample/SampleDriverUtils.h
@@ -14,48 +14,45 @@ * limitations under the License. */ -#ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_SAMPLE_DRIVER_UTILS_H -#define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_SAMPLE_DRIVER_UTILS_H - -#include <HalInterfaces.h> #include <hwbinder/IPCThreadState.h> #include <thread> +#include "HalInterfaces.h" #include "SampleDriver.h" namespace android { namespace nn { namespace sample_driver { -void notify(const sp<V1_0::IPreparedModelCallback>& callback, const V1_3::ErrorStatus& status, +void notify(const sp<hal::V1_0::IPreparedModelCallback>& callback, const hal::ErrorStatus& status, const sp<SamplePreparedModel>& preparedModel); -void notify(const sp<V1_2::IPreparedModelCallback>& callback, const V1_3::ErrorStatus& status, +void notify(const sp<hal::V1_2::IPreparedModelCallback>& callback, const hal::ErrorStatus& status, const sp<SamplePreparedModel>& preparedModel); -void notify(const sp<V1_3::IPreparedModelCallback>& callback, const V1_3::ErrorStatus& status, +void notify(const sp<hal::V1_3::IPreparedModelCallback>& callback, const hal::ErrorStatus& status, const sp<SamplePreparedModel>& preparedModel); -void notify(const sp<V1_0::IExecutionCallback>& callback, const V1_3::ErrorStatus& status, - const hardware::hidl_vec<V1_2::OutputShape>&, V1_2::Timing); +void notify(const sp<hal::V1_0::IExecutionCallback>& callback, const hal::ErrorStatus& status, + const hal::hidl_vec<hal::OutputShape>&, hal::Timing); -void notify(const sp<V1_2::IExecutionCallback>& callback, const V1_3::ErrorStatus& status, - const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, V1_2::Timing timing); +void notify(const sp<hal::V1_2::IExecutionCallback>& callback, const hal::ErrorStatus& status, + const hal::hidl_vec<hal::OutputShape>& outputShapes, hal::Timing timing); -void notify(const sp<V1_3::IExecutionCallback>& callback, const V1_3::ErrorStatus& status, - const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, V1_2::Timing timing); +void notify(const sp<hal::V1_3::IExecutionCallback>& callback, const hal::ErrorStatus& status, + const hal::hidl_vec<hal::OutputShape>& outputShapes, hal::Timing timing); template <typename T_Model, typename T_IPreparedModelCallback> -V1_3::ErrorStatus prepareModelBase(const T_Model& model, const SampleDriver* driver, - V1_1::ExecutionPreference preference, V1_3::Priority priority, - const V1_3::OptionalTimePoint& halDeadline, - const sp<T_IPreparedModelCallback>& callback, - bool isFullModelSupported = true) { +hal::ErrorStatus prepareModelBase(const T_Model& model, const SampleDriver* driver, + hal::ExecutionPreference preference, hal::Priority priority, + const hal::OptionalTimePoint& halDeadline, + const sp<T_IPreparedModelCallback>& callback, + bool isFullModelSupported = true) { const uid_t userId = hardware::IPCThreadState::self()->getCallingUid(); if (callback.get() == nullptr) { LOG(ERROR) << "invalid callback passed to prepareModelBase"; - return V1_3::ErrorStatus::INVALID_ARGUMENT; + return hal::ErrorStatus::INVALID_ARGUMENT; } if (VLOG_IS_ON(DRIVER)) { VLOG(DRIVER) << "prepareModelBase"; @@ -63,17 +60,17 @@ } if (!validateModel(model) || !validateExecutionPreference(preference) || !validatePriority(priority)) { - notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr); - return V1_3::ErrorStatus::INVALID_ARGUMENT; + notify(callback, hal::ErrorStatus::INVALID_ARGUMENT, nullptr); + return hal::ErrorStatus::INVALID_ARGUMENT; } if (!isFullModelSupported) { - notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr); - return V1_3::ErrorStatus::NONE; + notify(callback, hal::ErrorStatus::INVALID_ARGUMENT, nullptr); + return hal::ErrorStatus::NONE; } const auto deadline = makeDeadline(halDeadline); if (hasDeadlinePassed(deadline)) { - notify(callback, V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT, nullptr); - return V1_3::ErrorStatus::NONE; + notify(callback, hal::ErrorStatus::MISSED_DEADLINE_PERSISTENT, nullptr); + return hal::ErrorStatus::NONE; } // asynchronously prepare the model from a new, detached thread @@ -81,17 +78,15 @@ sp<SamplePreparedModel> preparedModel = new SamplePreparedModel(convertToV1_3(model), driver, preference, userId, priority); if (!preparedModel->initialize()) { - notify(callback, V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr); + notify(callback, hal::ErrorStatus::INVALID_ARGUMENT, nullptr); return; } - notify(callback, V1_3::ErrorStatus::NONE, preparedModel); + notify(callback, hal::ErrorStatus::NONE, preparedModel); }).detach(); - return V1_3::ErrorStatus::NONE; + return hal::ErrorStatus::NONE; } } // namespace sample_driver } // namespace nn } // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_SAMPLE_DRIVER_UTILS_H
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-xnnpack.rc b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-xnnpack.rc deleted file mode 100644 index 39d22ca..0000000 --- a/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-xnnpack.rc +++ /dev/null
@@ -1,5 +0,0 @@ -service neuralnetworks_hal_service_sample_float_xnnpack /vendor/bin/hw/[email protected] - class hal - user system - group system - task_profiles NNApiHALPerformance
diff --git a/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-xnnpack.xml b/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-xnnpack.xml deleted file mode 100644 index 71b5a08..0000000 --- a/driver/sample/config/android.hardware.neuralnetworks@1.3-service-sample-float-xnnpack.xml +++ /dev/null
@@ -1,7 +0,0 @@ -<manifest version="1.0" type="device"> - <hal format="hidl"> - <name>android.hardware.neuralnetworks</name> - <transport>hwbinder</transport> - <fqname>@1.3::IDevice/nnapi-sample_float_xnnpack</fqname> - </hal> -</manifest>
diff --git a/driver/sample_aidl/Android.bp b/driver/sample_aidl/Android.bp deleted file mode 100644 index 21b9905..0000000 --- a/driver/sample_aidl/Android.bp +++ /dev/null
@@ -1,109 +0,0 @@ -/* - * Copyright 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - -cc_defaults { - name: "NeuralNetworksSampleDriverAidl_defaults", - defaults: ["neuralnetworks_defaults"], - // b/109953668, disable OpenMP - // openmp: true, - srcs: [ - "SampleDriver.cpp", - "SampleDriverFull.cpp", - "SampleDriverPartial.cpp", - "SampleDriverUtils.cpp", - ], - header_libs: [ - "libneuralnetworks_headers", - ], - shared_libs: [ - "android.hardware.neuralnetworks-V1-ndk_platform", - "[email protected]", - "[email protected]", - "[email protected]", - "[email protected]", - "[email protected]", - "[email protected]", - "libbase", - "libbinder_ndk", - "libcutils", - "libdl", - "libfmq", - "libhardware", - "libhidlbase", - "libhidlmemory", - "liblog", - "libnativewindow", - "libtextclassifier_hash", - "libutils", - ], - static_libs: [ - "libaidlcommonsupport", - "libneuralnetworks_common", - "neuralnetworks_utils_hal_aidl", - "neuralnetworks_utils_hal_common", - ], -} - -cc_defaults { - name: "NeuralNetworksSampleDriverAidl_server_defaults", - defaults: ["NeuralNetworksSampleDriverAidl_defaults"], - relative_install_path: "hw", - proprietary: true, -} - -cc_binary { - name: "android.hardware.neuralnetworks-service-sample-all", - srcs: ["SampleDriverAll.cpp"], - defaults: ["NeuralNetworksSampleDriverAidl_server_defaults"], - init_rc: ["config/android.hardware.neuralnetworks-service-sample-all.rc"], - vintf_fragments: ["config/android.hardware.neuralnetworks-service-sample-all.xml"], -} - -cc_binary { - name: "android.hardware.neuralnetworks-service-sample-float-fast", - srcs: ["SampleDriverFloatFast.cpp"], - defaults: ["NeuralNetworksSampleDriverAidl_server_defaults"], - init_rc: ["config/android.hardware.neuralnetworks-service-sample-float-fast.rc"], - vintf_fragments: ["config/android.hardware.neuralnetworks-service-sample-float-fast.xml"], -} - -cc_binary { - name: "android.hardware.neuralnetworks-service-sample-float-slow", - srcs: ["SampleDriverFloatSlow.cpp"], - defaults: ["NeuralNetworksSampleDriverAidl_server_defaults"], - init_rc: ["config/android.hardware.neuralnetworks-service-sample-float-slow.rc"], - vintf_fragments: ["config/android.hardware.neuralnetworks-service-sample-float-slow.xml"], -} - -cc_binary { - name: "android.hardware.neuralnetworks-service-sample-quant", - srcs: ["SampleDriverQuant.cpp"], - defaults: ["NeuralNetworksSampleDriverAidl_server_defaults"], - init_rc: ["config/android.hardware.neuralnetworks-service-sample-quant.rc"], - vintf_fragments: ["config/android.hardware.neuralnetworks-service-sample-quant.xml"], -} - -cc_binary { - name: "android.hardware.neuralnetworks-service-sample-minimal", - srcs: ["SampleDriverMinimal.cpp"], - defaults: ["NeuralNetworksSampleDriverAidl_server_defaults"], - init_rc: ["config/android.hardware.neuralnetworks-service-sample-minimal.rc"], - vintf_fragments: ["config/android.hardware.neuralnetworks-service-sample-minimal.xml"], -}
diff --git a/driver/sample_aidl/SampleDriver.cpp b/driver/sample_aidl/SampleDriver.cpp deleted file mode 100644 index d1bf85c..0000000 --- a/driver/sample_aidl/SampleDriver.cpp +++ /dev/null
@@ -1,641 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "SampleDriver" - -#include "SampleDriver.h" - -#include <android-base/logging.h> -#include <android-base/properties.h> -#include <android-base/scopeguard.h> -#include <android/binder_auto_utils.h> -#include <android/binder_interface_utils.h> -#include <android/binder_manager.h> -#include <android/binder_process.h> -#include <nnapi/Result.h> -#include <nnapi/Types.h> -#include <nnapi/Validation.h> -#include <nnapi/hal/aidl/Conversions.h> -#include <nnapi/hal/aidl/Utils.h> - -#include <algorithm> -#include <chrono> -#include <map> -#include <memory> -#include <optional> -#include <set> -#include <string> -#include <thread> -#include <tuple> -#include <utility> -#include <variant> -#include <vector> - -#include "AidlBufferTracker.h" -#include "AidlHalUtils.h" -#include "CpuExecutor.h" -#include "SampleDriverUtils.h" -#include "Tracing.h" -#include "Utils.h" - -namespace android { -namespace nn { -namespace sample_driver { - -namespace { - -int64_t nanosecondsDuration(TimePoint end, TimePoint start) { - return std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); -}; - -constexpr aidl_hal::Timing kNoTiming = {.timeOnDeviceNs = -1, .timeInDriverNs = -1}; - -} // namespace - -ndk::ScopedAStatus SampleDriver::getVersionString(std::string* versionString) { - NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, - "SampleDriver::getVersionString"); - *versionString = "JUST_AN_EXAMPLE"; - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus SampleDriver::getType(aidl_hal::DeviceType* deviceType) { - NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, "SampleDriver::getType"); - *deviceType = aidl_hal::DeviceType::CPU; - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus SampleDriver::getSupportedExtensions( - std::vector<aidl_hal::Extension>* supportedExtensions) { - NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, - "SampleDriver::getSupportedExtensions"); - *supportedExtensions = {/* No extensions. */}; - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus SampleDriver::getNumberOfCacheFilesNeeded( - aidl_hal::NumberOfCacheFiles* numberOfCacheFiles) { - NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INITIALIZATION, - "SampleDriver::getNumberOfCacheFilesNeeded"); - // Set both numbers to be 0 for cache not supported. - numberOfCacheFiles->numDataCache = 0; - numberOfCacheFiles->numModelCache = 0; - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus SampleDriver::prepareModel( - const aidl_hal::Model& model, aidl_hal::ExecutionPreference preference, - aidl_hal::Priority priority, int64_t deadlineNs, - const std::vector<ndk::ScopedFileDescriptor>& /*modelCache*/, - const std::vector<ndk::ScopedFileDescriptor>& /*dataCache*/, - const std::vector<uint8_t>& /*token*/, - const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback) { - NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, "SampleDriver::prepareModel"); - auto copiedModel = aidl_hal::utils::clone(model); - if (!copiedModel.has_value()) { - return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, copiedModel.error().message); - } - return prepareModelBase(std::move(copiedModel).value(), this, preference, priority, deadlineNs, - callback); -} - -ndk::ScopedAStatus SampleDriver::prepareModelFromCache( - int64_t /*deadlineNs*/, const std::vector<ndk::ScopedFileDescriptor>& /*modelCache*/, - const std::vector<ndk::ScopedFileDescriptor>& /*dataCache*/, - const std::vector<uint8_t>& /*token*/, - const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback) { - NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_COMPILATION, - "SampleDriver::prepareModelFromCache"); - notify(callback, aidl_hal::ErrorStatus::GENERAL_FAILURE, nullptr); - return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, - "Caching is not supported in the sample driver."); -} - -// Safely downcast an IPreparedModel object to SamplePreparedModel. -// This function will return nullptr if the IPreparedModel object is not originated from the sample -// driver process. -static const SamplePreparedModel* castToSamplePreparedModel( - const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel) { - if (preparedModel->isRemote()) { - return nullptr; - } else { - // This static_cast is safe because SamplePreparedModel is the only class that implements - // the IPreparedModel interface in the sample driver process. - return static_cast<const SamplePreparedModel*>(preparedModel.get()); - } -} - -ndk::ScopedAStatus SampleDriver::allocate( - const aidl_hal::BufferDesc& desc, - const std::vector<aidl_hal::IPreparedModelParcel>& halPreparedModels, - const std::vector<aidl_hal::BufferRole>& inputRoles, - const std::vector<aidl_hal::BufferRole>& outputRoles, aidl_hal::DeviceBuffer* buffer) { - VLOG(DRIVER) << "SampleDriver::allocate"; - constexpr auto getModel = [](const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel) - -> const aidl_hal::Model* { - const auto* samplePreparedModel = castToSamplePreparedModel(preparedModel); - if (samplePreparedModel == nullptr) { - LOG(ERROR) << "SampleDriver::allocate -- unknown remote IPreparedModel."; - return nullptr; - } - return samplePreparedModel->getModel(); - }; - - std::vector<std::shared_ptr<aidl_hal::IPreparedModel>> preparedModels; - preparedModels.reserve(halPreparedModels.size()); - for (const auto& halPreparedModelParcel : halPreparedModels) { - preparedModels.push_back(halPreparedModelParcel.preparedModel); - } - std::set<AidlHalPreparedModelRole> roles; - aidl_hal::Operand operand; - if (!validateMemoryDesc(desc, preparedModels, inputRoles, outputRoles, getModel, &roles, - &operand)) { - LOG(ERROR) << "SampleDriver::allocate -- validation failed."; - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, - "SampleDriver::allocate -- validation failed."); - } - - if (isExtensionOperandType(operand.type)) { - LOG(ERROR) << "SampleDriver::allocate -- does not support extension type."; - return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, - "SampleDriver::allocate -- does not support extension type."); - } - - // TODO(xusongw): Support allocating buffers with unknown dimensions or rank. - - // An operand obtained from validateMemoryDesc is guaranteed to be representable in canonical - // types. - uint32_t size = nonExtensionOperandSizeOfData(convert(operand.type).value(), - toUnsigned(operand.dimensions).value()); - VLOG(DRIVER) << "SampleDriver::allocate -- type = " << toString(operand.type) - << ", dimensions = " << toString(operand.dimensions) << ", size = " << size; - if (size == 0) { - LOG(ERROR) << "SampleDriver::allocate -- does not support dynamic output shape."; - return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, - "SampleDriver::allocate -- does not support dynamic output shape."); - } - - // An operand obtained from validateMemoryDesc is guaranteed to be representable in canonical - // types, so it safe to do an unvalidated conversion here. - auto bufferWrapper = - AidlManagedBuffer::create(size, std::move(roles), unvalidatedConvert(operand).value()); - if (bufferWrapper == nullptr) { - LOG(ERROR) << "SampleDriver::allocate -- not enough memory."; - return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, - "SampleDriver::allocate -- not enough memory."); - } - - auto token = mBufferTracker->add(bufferWrapper); - if (token == nullptr) { - LOG(ERROR) << "SampleDriver::allocate -- AidlBufferTracker returned invalid token."; - return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, - "SampleDriver::allocate -- AidlBufferTracker returned invalid token."); - } - - const uint32_t tokenValue = token->get(); - std::shared_ptr<SampleBuffer> sampleBuffer = - ndk::SharedRefBase::make<SampleBuffer>(std::move(bufferWrapper), std::move(token)); - VLOG(DRIVER) << "SampleDriver::allocate -- successfully allocates the requested memory"; - buffer->buffer = std::move(sampleBuffer); - buffer->token = tokenValue; - return ndk::ScopedAStatus::ok(); -} - -int SampleDriver::run() { - ABinderProcess_setThreadPoolMaxThreadCount(4); - const std::string name = std::string(SampleDriver::descriptor) + "/" + mName; - const binder_status_t status = AServiceManager_addService(this->asBinder().get(), name.c_str()); - if (status != STATUS_OK) { - return 1; - } - ABinderProcess_joinThreadPool(); - return 1; -} - -static void copyRunTimePoolInfos(const RunTimePoolInfo& srcPool, const RunTimePoolInfo& dstPool) { - CHECK(srcPool.getBuffer() != nullptr); - CHECK(dstPool.getBuffer() != nullptr); - CHECK(srcPool.getSize() == dstPool.getSize()); - std::copy(srcPool.getBuffer(), srcPool.getBuffer() + srcPool.getSize(), dstPool.getBuffer()); - dstPool.flush(); -} - -ndk::ScopedAStatus SampleBuffer::copyTo(const aidl_hal::Memory& dst) { - const auto canonicalMemory = convert(dst); - if (!canonicalMemory.has_value()) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, canonicalMemory.error().message); - } - const auto dstPool = RunTimePoolInfo::createFromMemory(canonicalMemory.value()); - if (!dstPool.has_value()) { - LOG(ERROR) << "SampleBuffer::copyTo -- unable to map dst memory."; - return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, - "SampleBuffer::copyTo -- unable to map dst memory."); - } - const auto validationStatus = - aidl_hal::utils::convert(kBuffer->validateCopyTo(dstPool->getSize())).value(); - if (validationStatus != aidl_hal::ErrorStatus::NONE) { - return toAStatus(validationStatus); - } - const auto srcPool = kBuffer->createRunTimePoolInfo(); - copyRunTimePoolInfos(srcPool, dstPool.value()); - return ndk::ScopedAStatus::ok(); -} - -static aidl_hal::ErrorStatus copyFromInternal( - const aidl_hal::Memory& src, const std::vector<uint32_t>& dimensions, - const std::shared_ptr<AidlManagedBuffer>& bufferWrapper) { - CHECK(bufferWrapper != nullptr); - const auto canonicalMemory = convert(src); - if (!canonicalMemory.has_value()) { - return aidl_hal::ErrorStatus::INVALID_ARGUMENT; - } - const auto srcPool = RunTimePoolInfo::createFromMemory(canonicalMemory.value()); - if (!srcPool.has_value()) { - LOG(ERROR) << "SampleBuffer::copyFrom -- unable to map src memory."; - return aidl_hal::ErrorStatus::GENERAL_FAILURE; - } - const auto validationStatus = aidl_hal::utils::convert(bufferWrapper->validateCopyFrom( - dimensions, srcPool->getSize())) - .value(); - if (validationStatus != aidl_hal::ErrorStatus::NONE) { - return validationStatus; - } - const auto dstPool = bufferWrapper->createRunTimePoolInfo(); - copyRunTimePoolInfos(srcPool.value(), dstPool); - return aidl_hal::ErrorStatus::NONE; -} - -ndk::ScopedAStatus SampleBuffer::copyFrom(const aidl_hal::Memory& src, - const std::vector<int32_t>& dimensions) { - const auto unsignedDimensions = toUnsigned(dimensions); - if (!unsignedDimensions.has_value()) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, - unsignedDimensions.error().message); - } - const auto status = copyFromInternal(src, unsignedDimensions.value(), kBuffer); - if (status != aidl_hal::ErrorStatus::NONE) { - kBuffer->setInitialized(false); - return toAStatus(status); - } - kBuffer->updateDimensions(unsignedDimensions.value()); - kBuffer->setInitialized(true); - return ndk::ScopedAStatus::ok(); -} - -bool SamplePreparedModel::initialize() { - const auto canonicalPools = convert(mModel.pools); - if (!canonicalPools.has_value()) { - return false; - } - return setRunTimePoolInfosFromCanonicalMemories(&mPoolInfos, canonicalPools.value()); -} - -static std::tuple<aidl_hal::ErrorStatus, std::vector<RunTimePoolInfo>, - std::vector<std::shared_ptr<AidlManagedBuffer>>> -createRunTimePoolInfos(const Request& request, const SampleDriver& driver, - const SamplePreparedModel* preparedModel) { - std::vector<RunTimePoolInfo> requestPoolInfos; - std::vector<std::shared_ptr<AidlManagedBuffer>> bufferWrappers; - requestPoolInfos.reserve(request.pools.size()); - bufferWrappers.reserve(request.pools.size()); - for (uint32_t i = 0; i < request.pools.size(); i++) { - const auto& pool = request.pools[i]; - if (const auto* memory = std::get_if<SharedMemory>(&pool)) { - auto buffer = RunTimePoolInfo::createFromMemory(*memory); - if (!buffer.has_value()) { - LOG(ERROR) << "createRuntimeMemoriesFromMemoryPools -- could not map pools"; - return {aidl_hal::ErrorStatus::GENERAL_FAILURE, {}, {}}; - } - requestPoolInfos.push_back(std::move(*buffer)); - bufferWrappers.push_back(nullptr); - } else if (const auto* token = std::get_if<Request::MemoryDomainToken>(&pool)) { - auto bufferWrapper = driver.getBufferTracker()->get(static_cast<uint32_t>(*token)); - if (bufferWrapper == nullptr) { - return {aidl_hal::ErrorStatus::INVALID_ARGUMENT, {}, {}}; - } - const auto validationStatus = - aidl_hal::utils::convert( - bufferWrapper->validateRequest(i, request, preparedModel)) - .value(); - if (validationStatus != aidl_hal::ErrorStatus::NONE) { - return {validationStatus, {}, {}}; - } - requestPoolInfos.push_back(bufferWrapper->createRunTimePoolInfo()); - bufferWrappers.push_back(std::move(bufferWrapper)); - } else { - // If the pool is not a Memory or a token, the input is invalid. - return {aidl_hal::ErrorStatus::INVALID_ARGUMENT, {}, {}}; - } - } - return {aidl_hal::ErrorStatus::NONE, std::move(requestPoolInfos), std::move(bufferWrappers)}; -} - -static aidl_hal::ErrorStatus updateDeviceMemories( - aidl_hal::ErrorStatus status, const Request& request, - const std::vector<std::shared_ptr<AidlManagedBuffer>>& bufferWrappers, - const std::vector<aidl_hal::OutputShape>& outputShapes) { - if (status == aidl_hal::ErrorStatus::NONE) { - for (uint32_t i = 0; i < request.outputs.size(); i++) { - const uint32_t poolIndex = request.outputs[i].location.poolIndex; - const auto& pool = request.pools[poolIndex]; - if (std::holds_alternative<Request::MemoryDomainToken>(pool)) { - const auto unsignedDimensions = toUnsigned(outputShapes[i].dimensions).value(); - if (!bufferWrappers[poolIndex]->updateDimensions(unsignedDimensions)) { - return aidl_hal::ErrorStatus::GENERAL_FAILURE; - } - } - } - for (uint32_t i = 0; i < request.outputs.size(); i++) { - const uint32_t poolIndex = request.outputs[i].location.poolIndex; - const auto& pool = request.pools[poolIndex]; - if (std::holds_alternative<Request::MemoryDomainToken>(pool)) { - bufferWrappers[poolIndex]->setInitialized(true); - } - } - } else if (status == aidl_hal::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - // If CpuExecutor reports OUTPUT_INSUFFCIENT_SIZE on a device memory, this is because the - // dimensions of the device memory are incorrectly specified. The driver should return - // GENERAL_FAILURE instead in this case. - for (uint32_t i = 0; i < request.outputs.size(); i++) { - const uint32_t poolIndex = request.outputs[i].location.poolIndex; - const auto& pool = request.pools[poolIndex]; - if (std::holds_alternative<Request::MemoryDomainToken>(pool)) { - if (!outputShapes[i].isSufficient) { - LOG(ERROR) << "Invalid dimensions for output " << i - << ": actual shape = " << toString(outputShapes[i].dimensions); - return aidl_hal::ErrorStatus::GENERAL_FAILURE; - } - } - } - } - return aidl_hal::ErrorStatus::NONE; -} - -ndk::ScopedAStatus SamplePreparedModel::executeSynchronously( - const aidl_hal::Request& halRequest, bool measureTiming, int64_t halDeadlineNs, - int64_t loopTimeoutDurationNs, aidl_hal::ExecutionResult* executionResult) { - NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, - "SampleDriver::executeSynchronously"); - VLOG(DRIVER) << "executeSynchronously(" << SHOW_IF_DEBUG(halRequest.toString()) << ")"; - - TimePoint driverStart, driverEnd, deviceStart, deviceEnd; - if (measureTiming) driverStart = Clock::now(); - - const auto model = convert(mModel).value(); - - auto maybeRequest = convert(halRequest); - if (!maybeRequest.has_value()) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, maybeRequest.error().message); - } - const auto request = std::move(maybeRequest).value(); - - const auto validationResult = validateRequestForModel(request, model); - if (!validationResult.ok()) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, validationResult.error()); - } - - if (halDeadlineNs < -1) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, - "Invalid deadline: " + toString(halDeadlineNs)); - } - if (loopTimeoutDurationNs < -1) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, - "Invalid loop timeout duration: " + toString(loopTimeoutDurationNs)); - } - - const auto deadline = makeDeadline(halDeadlineNs); - if (hasDeadlinePassed(deadline)) { - return toAStatus(aidl_hal::ErrorStatus::MISSED_DEADLINE_PERSISTENT); - } - - NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INPUTS_AND_OUTPUTS, - "SampleDriver::executeSynchronouslyBase"); - const auto [poolStatus, requestPoolInfos, bufferWrappers] = - createRunTimePoolInfos(request, *mDriver, this); - if (poolStatus != aidl_hal::ErrorStatus::NONE) { - return toAStatus(poolStatus); - } - - NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, - "SampleDriver::executeSynchronouslyBase"); - CpuExecutor executor = mDriver->getExecutor(); - if (loopTimeoutDurationNs >= 0) { - executor.setLoopTimeout(loopTimeoutDurationNs); - } - if (deadline.has_value()) { - executor.setDeadline(*deadline); - } - if (measureTiming) deviceStart = Clock::now(); - int n = executor.run(model, request, mPoolInfos, requestPoolInfos); - if (measureTiming) deviceEnd = Clock::now(); - VLOG(DRIVER) << "executor.run returned " << n; - aidl_hal::ErrorStatus executionStatus = convertResultCodeToAidlErrorStatus(n); - if (executionStatus != aidl_hal::ErrorStatus::NONE && - executionStatus != aidl_hal::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - return toAStatus(executionStatus); - } - auto outputShapes = aidl_hal::utils::convert(executor.getOutputShapes()).value(); - - // Update device memory metadata. - const aidl_hal::ErrorStatus updateStatus = - updateDeviceMemories(executionStatus, request, bufferWrappers, outputShapes); - if (updateStatus != aidl_hal::ErrorStatus::NONE) { - return toAStatus(updateStatus); - } - - executionResult->outputSufficientSize = - executionStatus != aidl_hal::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; - executionResult->outputShapes = std::move(outputShapes); - executionResult->timing = kNoTiming; - if (measureTiming && executionStatus == aidl_hal::ErrorStatus::NONE) { - driverEnd = Clock::now(); - aidl_hal::Timing timing = {.timeOnDeviceNs = nanosecondsDuration(deviceEnd, deviceStart), - .timeInDriverNs = nanosecondsDuration(driverEnd, driverStart)}; - VLOG(DRIVER) << "executeSynchronously timing = " << timing.toString(); - - executionResult->timing = timing; - } - return ndk::ScopedAStatus::ok(); -} - -// The sample driver will finish the execution and then return. -ndk::ScopedAStatus SamplePreparedModel::executeFenced( - const aidl_hal::Request& halRequest, const std::vector<ndk::ScopedFileDescriptor>& waitFor, - bool measureTiming, int64_t halDeadlineNs, int64_t loopTimeoutDurationNs, - int64_t durationNs, aidl_hal::FencedExecutionResult* executionResult) { - NNTRACE_FULL(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, - "SamplePreparedModel::executeFenced"); - VLOG(DRIVER) << "executeFenced(" << SHOW_IF_DEBUG(halRequest.toString()) << ")"; - - TimePoint driverStart, driverEnd, deviceStart, deviceEnd; - if (measureTiming) driverStart = Clock::now(); - - const auto model = convert(mModel).value(); - - auto maybeRequest = convert(halRequest); - if (!maybeRequest.has_value()) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, maybeRequest.error().message); - } - const auto request = std::move(maybeRequest).value(); - - const auto validationResult = - validateRequestForModel(request, model, /*allowUnspecifiedOutput=*/false); - if (!validationResult.ok()) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, validationResult.error()); - } - - if (halDeadlineNs < -1) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, - "Invalid deadline: " + toString(halDeadlineNs)); - } - if (loopTimeoutDurationNs < -1) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, - "Invalid loop timeout duration: " + toString(loopTimeoutDurationNs)); - } - if (durationNs < -1) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, - "Invalid fenced execution duration: " + toString(durationNs)); - } - - const auto deadline = makeDeadline(halDeadlineNs); - if (hasDeadlinePassed(deadline)) { - return toAStatus(aidl_hal::ErrorStatus::MISSED_DEADLINE_PERSISTENT); - } - - // Wait for the dependent events to signal - for (const auto& fenceHandle : waitFor) { - int syncFenceFd = fenceHandle.get(); - if (syncWait(syncFenceFd, -1) != FenceState::SIGNALED) { - LOG(ERROR) << "syncWait failed"; - return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, "syncWait failed"); - } - } - - // Update deadline if the timeout duration is closer than the deadline. - auto closestDeadline = deadline; - if (durationNs >= 0) { - const auto timeoutDurationDeadline = makeDeadline(durationNs); - if (!closestDeadline.has_value() || *closestDeadline > timeoutDurationDeadline) { - closestDeadline = timeoutDurationDeadline; - } - } - - TimePoint driverStartAfterFence; - if (measureTiming) driverStartAfterFence = Clock::now(); - - NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_INPUTS_AND_OUTPUTS, - "SamplePreparedModel::executeFenced"); - const auto [poolStatus, requestPoolInfos, bufferWrappers] = - createRunTimePoolInfos(request, *mDriver, this); - if (poolStatus != aidl_hal::ErrorStatus::NONE) { - return toAStatus(poolStatus); - } - - NNTRACE_FULL_SWITCH(NNTRACE_LAYER_DRIVER, NNTRACE_PHASE_EXECUTION, - "SamplePreparedModel::executeFenced"); - CpuExecutor executor = mDriver->getExecutor(); - if (loopTimeoutDurationNs >= 0) { - executor.setLoopTimeout(loopTimeoutDurationNs); - } - if (closestDeadline.has_value()) { - executor.setDeadline(*closestDeadline); - } - if (measureTiming) deviceStart = Clock::now(); - int n = executor.run(model, request, mPoolInfos, requestPoolInfos); - if (measureTiming) deviceEnd = Clock::now(); - VLOG(DRIVER) << "executor.run returned " << n; - aidl_hal::ErrorStatus executionStatus = convertResultCodeToAidlErrorStatus(n); - if (executionStatus != aidl_hal::ErrorStatus::NONE) { - return toAStatus(executionStatus); - } - - // Set output memories to the initialized state. - if (executionStatus == aidl_hal::ErrorStatus::NONE) { - for (const auto& output : request.outputs) { - const uint32_t poolIndex = output.location.poolIndex; - const auto& pool = request.pools[poolIndex]; - if (std::holds_alternative<Request::MemoryDomainToken>(pool)) { - bufferWrappers[poolIndex]->setInitialized(true); - } - } - } - - aidl_hal::Timing timingSinceLaunch = kNoTiming; - aidl_hal::Timing timingAfterFence = kNoTiming; - if (measureTiming) { - driverEnd = Clock::now(); - timingSinceLaunch = {.timeOnDeviceNs = nanosecondsDuration(deviceEnd, deviceStart), - .timeInDriverNs = nanosecondsDuration(driverEnd, driverStart)}; - timingAfterFence = { - .timeOnDeviceNs = nanosecondsDuration(deviceEnd, deviceStart), - .timeInDriverNs = nanosecondsDuration(driverEnd, driverStartAfterFence)}; - VLOG(DRIVER) << "executeFenced timingSinceLaunch = " << timingSinceLaunch.toString(); - VLOG(DRIVER) << "executeFenced timingAfterFence = " << timingAfterFence.toString(); - } - - executionResult->callback = ndk::SharedRefBase::make<SampleFencedExecutionCallback>( - timingSinceLaunch, timingAfterFence, executionStatus); - executionResult->syncFence = ndk::ScopedFileDescriptor(); - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus SamplePreparedModel::configureExecutionBurst( - std::shared_ptr<aidl_hal::IBurst>* burst) { - std::shared_ptr<SamplePreparedModel> self = this->template ref<SamplePreparedModel>(); - *burst = ndk::SharedRefBase::make<SampleBurst>(std::move(self)); - return ndk::ScopedAStatus::ok(); -} - -SampleBurst::SampleBurst(std::shared_ptr<SamplePreparedModel> preparedModel) - : kPreparedModel(std::move(preparedModel)) { - CHECK(kPreparedModel != nullptr); -} - -ndk::ScopedAStatus SampleBurst::executeSynchronously( - const aidl_hal::Request& request, const std::vector<int64_t>& memoryIdentifierTokens, - bool measureTiming, int64_t deadlineNs, int64_t loopTimeoutDurationNs, - aidl_hal::ExecutionResult* executionResult) { - if (request.pools.size() != memoryIdentifierTokens.size()) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, - "request.pools.size() != memoryIdentifierTokens.size()"); - } - if (!std::all_of(memoryIdentifierTokens.begin(), memoryIdentifierTokens.end(), - [](int64_t token) { return token >= -1; })) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, "Invalid memoryIdentifierTokens"); - } - - // Ensure at most one execution is in flight at a time. - const bool executionAlreadyInFlight = mExecutionInFlight.test_and_set(); - if (executionAlreadyInFlight) { - return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, - "Burst object supports at most one execution at a time"); - } - const auto guard = base::make_scope_guard([this] { mExecutionInFlight.clear(); }); - - return kPreparedModel->executeSynchronously(request, measureTiming, deadlineNs, - loopTimeoutDurationNs, executionResult); -} - -ndk::ScopedAStatus SampleBurst::releaseMemoryResource(int64_t memoryIdentifierToken) { - if (memoryIdentifierToken < -1) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, "Invalid memoryIdentifierToken"); - } - return ndk::ScopedAStatus::ok(); -} - -} // namespace sample_driver -} // namespace nn -} // namespace android
diff --git a/driver/sample_aidl/SampleDriver.h b/driver/sample_aidl/SampleDriver.h deleted file mode 100644 index e63f164..0000000 --- a/driver/sample_aidl/SampleDriver.h +++ /dev/null
@@ -1,182 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_H -#define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_H - -#include <android/binder_auto_utils.h> - -#include <memory> -#include <string> -#include <utility> -#include <vector> - -#include "AidlBufferTracker.h" -#include "AidlHalInterfaces.h" -#include "CpuExecutor.h" -#include "NeuralNetworks.h" - -namespace android { -namespace nn { -namespace sample_driver { - -// Manages the data buffer for an operand. -class SampleBuffer : public aidl_hal::BnBuffer { - public: - SampleBuffer(std::shared_ptr<AidlManagedBuffer> buffer, - std::unique_ptr<AidlBufferTracker::Token> token) - : kBuffer(std::move(buffer)), kToken(std::move(token)) { - CHECK(kBuffer != nullptr); - CHECK(kToken != nullptr); - } - ndk::ScopedAStatus copyFrom(const aidl_hal::Memory& src, - const std::vector<int32_t>& dimensions) override; - ndk::ScopedAStatus copyTo(const aidl_hal::Memory& dst) override; - - private: - const std::shared_ptr<AidlManagedBuffer> kBuffer; - const std::unique_ptr<AidlBufferTracker::Token> kToken; -}; - -// Base class used to create sample drivers for the NN HAL. This class -// provides some implementation of the more common functions. -// -// Since these drivers simulate hardware, they must run the computations -// on the CPU. An actual driver would not do that. -class SampleDriver : public aidl_hal::BnDevice { - public: - SampleDriver(const char* name, - const IOperationResolver* operationResolver = BuiltinOperationResolver::get()) - : mName(name), - mOperationResolver(operationResolver), - mBufferTracker(AidlBufferTracker::create()) { - android::nn::initVLogMask(); - } - ndk::ScopedAStatus allocate(const aidl_hal::BufferDesc& desc, - const std::vector<aidl_hal::IPreparedModelParcel>& preparedModels, - const std::vector<aidl_hal::BufferRole>& inputRoles, - const std::vector<aidl_hal::BufferRole>& outputRoles, - aidl_hal::DeviceBuffer* buffer) override; - ndk::ScopedAStatus getNumberOfCacheFilesNeeded( - aidl_hal::NumberOfCacheFiles* numberOfCacheFiles) override; - ndk::ScopedAStatus getSupportedExtensions( - std::vector<aidl_hal::Extension>* extensions) override; - ndk::ScopedAStatus getType(aidl_hal::DeviceType* deviceType) override; - ndk::ScopedAStatus getVersionString(std::string* version) override; - ndk::ScopedAStatus prepareModel( - const aidl_hal::Model& model, aidl_hal::ExecutionPreference preference, - aidl_hal::Priority priority, int64_t deadlineNs, - const std::vector<ndk::ScopedFileDescriptor>& modelCache, - const std::vector<ndk::ScopedFileDescriptor>& dataCache, - const std::vector<uint8_t>& token, - const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback) override; - ndk::ScopedAStatus prepareModelFromCache( - int64_t deadlineNs, const std::vector<ndk::ScopedFileDescriptor>& modelCache, - const std::vector<ndk::ScopedFileDescriptor>& dataCache, - const std::vector<uint8_t>& token, - const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback) override; - - // Starts and runs the driver service. Typically called from main(). - // This will return only once the service shuts down. - int run(); - - CpuExecutor getExecutor() const { return CpuExecutor(mOperationResolver); } - const std::shared_ptr<AidlBufferTracker>& getBufferTracker() const { return mBufferTracker; } - - protected: - std::string mName; - const IOperationResolver* mOperationResolver; - const std::shared_ptr<AidlBufferTracker> mBufferTracker; -}; - -class SamplePreparedModel : public aidl_hal::BnPreparedModel { - public: - SamplePreparedModel(aidl_hal::Model&& model, const SampleDriver* driver, - aidl_hal::ExecutionPreference preference, uid_t userId, - aidl_hal::Priority priority) - : mModel(std::move(model)), - mDriver(driver), - kPreference(preference), - kUserId(userId), - kPriority(priority) { - (void)kUserId; - (void)kPriority; - } - bool initialize(); - ndk::ScopedAStatus executeSynchronously(const aidl_hal::Request& request, bool measureTiming, - int64_t deadlineNs, int64_t loopTimeoutDurationNs, - aidl_hal::ExecutionResult* executionResult) override; - ndk::ScopedAStatus executeFenced(const aidl_hal::Request& request, - const std::vector<ndk::ScopedFileDescriptor>& waitFor, - bool measureTiming, int64_t deadlineNs, - int64_t loopTimeoutDurationNs, int64_t durationNs, - aidl_hal::FencedExecutionResult* executionResult) override; - ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<aidl_hal::IBurst>* burst) override; - const aidl_hal::Model* getModel() const { return &mModel; } - - protected: - aidl_hal::Model mModel; - const SampleDriver* mDriver; - std::vector<RunTimePoolInfo> mPoolInfos; - const aidl_hal::ExecutionPreference kPreference; - const uid_t kUserId; - const aidl_hal::Priority kPriority; -}; - -class SampleFencedExecutionCallback : public aidl_hal::BnFencedExecutionCallback { - public: - SampleFencedExecutionCallback(aidl_hal::Timing timingSinceLaunch, - aidl_hal::Timing timingAfterFence, aidl_hal::ErrorStatus error) - : kTimingSinceLaunch(timingSinceLaunch), - kTimingAfterFence(timingAfterFence), - kErrorStatus(error) {} - ndk::ScopedAStatus getExecutionInfo(aidl_hal::Timing* timingLaunched, - aidl_hal::Timing* timingFenced, - aidl_hal::ErrorStatus* errorStatus) override { - *timingLaunched = kTimingSinceLaunch; - *timingFenced = kTimingAfterFence; - *errorStatus = kErrorStatus; - return ndk::ScopedAStatus::ok(); - } - - private: - const aidl_hal::Timing kTimingSinceLaunch; - const aidl_hal::Timing kTimingAfterFence; - const aidl_hal::ErrorStatus kErrorStatus; -}; - -class SampleBurst : public aidl_hal::BnBurst { - public: - // Precondition: preparedModel != nullptr - explicit SampleBurst(std::shared_ptr<SamplePreparedModel> preparedModel); - - ndk::ScopedAStatus executeSynchronously(const aidl_hal::Request& request, - const std::vector<int64_t>& memoryIdentifierTokens, - bool measureTiming, int64_t deadlineNs, - int64_t loopTimeoutDurationNs, - aidl_hal::ExecutionResult* executionResult) override; - ndk::ScopedAStatus releaseMemoryResource(int64_t memoryIdentifierToken) override; - - protected: - std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT; - const std::shared_ptr<SamplePreparedModel> kPreparedModel; -}; - -} // namespace sample_driver -} // namespace nn -} // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_H
diff --git a/driver/sample_aidl/SampleDriverAll.cpp b/driver/sample_aidl/SampleDriverAll.cpp deleted file mode 100644 index 1efa818..0000000 --- a/driver/sample_aidl/SampleDriverAll.cpp +++ /dev/null
@@ -1,33 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "SampleDriverAll" - -#include <android/binder_interface_utils.h> - -#include <memory> - -#include "SampleDriverFull.h" - -using aidl::android::hardware::neuralnetworks::PerformanceInfo; -using android::nn::sample_driver::SampleDriverFull; - -int main() { - const PerformanceInfo performance{.execTime = 1.1f, .powerUsage = 1.1f}; - std::shared_ptr<SampleDriverFull> driver = - ndk::SharedRefBase::make<SampleDriverFull>("nnapi-sample_all", performance); - return driver->run(); -}
diff --git a/driver/sample_aidl/SampleDriverFloatFast.cpp b/driver/sample_aidl/SampleDriverFloatFast.cpp deleted file mode 100644 index 19b2d7e..0000000 --- a/driver/sample_aidl/SampleDriverFloatFast.cpp +++ /dev/null
@@ -1,85 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "SampleDriverFloatFast" - -#include <android-base/logging.h> -#include <android/binder_auto_utils.h> -#include <hidl/LegacySupport.h> -#include <nnapi/hal/aidl/Conversions.h> - -#include <memory> -#include <thread> -#include <vector> - -#include "AidlHalUtils.h" -#include "SampleDriverPartial.h" - -namespace android { -namespace nn { -namespace sample_driver { - -class SampleDriverFloatFast : public SampleDriverPartial { - public: - SampleDriverFloatFast() : SampleDriverPartial("nnapi-sample_float_fast") {} - ndk::ScopedAStatus getCapabilities(aidl_hal::Capabilities* capabilities) override; - - private: - std::vector<bool> getSupportedOperationsImpl(const Model& model) const override; -}; - -ndk::ScopedAStatus SampleDriverFloatFast::getCapabilities(aidl_hal::Capabilities* capabilities) { - android::nn::initVLogMask(); - VLOG(DRIVER) << "getCapabilities()"; - - *capabilities = { - .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.7f, .powerUsage = 1.1f}, - .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.7f, .powerUsage = 1.1f}, - .operandPerformance = nonExtensionOperandPerformance({1.0f, 1.0f}), - .ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f}, - .whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}}; - update(&capabilities->operandPerformance, aidl_hal::OperandType::TENSOR_FLOAT32, - {.execTime = 0.8f, .powerUsage = 1.2f}); - update(&capabilities->operandPerformance, aidl_hal::OperandType::FLOAT32, - {.execTime = 0.8f, .powerUsage = 1.2f}); - - return ndk::ScopedAStatus::ok(); -} - -std::vector<bool> SampleDriverFloatFast::getSupportedOperationsImpl(const Model& model) const { - const size_t count = model.main.operations.size(); - std::vector<bool> supported(count); - for (size_t i = 0; i < count; i++) { - const Operation& operation = model.main.operations[i]; - if (!isExtensionOperationType(operation.type) && operation.inputs.size() > 0) { - const Operand& firstOperand = model.main.operands[operation.inputs[0]]; - supported[i] = firstOperand.type == OperandType::TENSOR_FLOAT32; - } - } - return supported; -} - -} // namespace sample_driver -} // namespace nn -} // namespace android - -using android::nn::sample_driver::SampleDriverFloatFast; - -int main() { - std::shared_ptr<SampleDriverFloatFast> driver = - ndk::SharedRefBase::make<SampleDriverFloatFast>(); - return driver->run(); -}
diff --git a/driver/sample_aidl/SampleDriverFloatSlow.cpp b/driver/sample_aidl/SampleDriverFloatSlow.cpp deleted file mode 100644 index f149608..0000000 --- a/driver/sample_aidl/SampleDriverFloatSlow.cpp +++ /dev/null
@@ -1,84 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "SampleDriverFloatSlow" - -#include <android-base/logging.h> -#include <hidl/LegacySupport.h> -#include <nnapi/hal/aidl/Conversions.h> - -#include <memory> -#include <thread> -#include <vector> - -#include "AidlHalUtils.h" -#include "SampleDriverPartial.h" - -namespace android { -namespace nn { -namespace sample_driver { - -class SampleDriverFloatSlow : public SampleDriverPartial { - public: - SampleDriverFloatSlow() : SampleDriverPartial("nnapi-sample_float_slow") {} - ndk::ScopedAStatus getCapabilities(aidl_hal::Capabilities* capabilities) override; - - private: - std::vector<bool> getSupportedOperationsImpl(const Model& model) const override; -}; - -ndk::ScopedAStatus SampleDriverFloatSlow::getCapabilities(aidl_hal::Capabilities* capabilities) { - android::nn::initVLogMask(); - VLOG(DRIVER) << "getCapabilities()"; - - *capabilities = { - .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 1.2f, .powerUsage = 0.6f}, - .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 1.2f, .powerUsage = 0.6f}, - .operandPerformance = nonExtensionOperandPerformance({1.0f, 1.0f}), - .ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f}, - .whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}}; - update(&capabilities->operandPerformance, aidl_hal::OperandType::TENSOR_FLOAT32, - {.execTime = 1.3f, .powerUsage = 0.7f}); - update(&capabilities->operandPerformance, aidl_hal::OperandType::FLOAT32, - {.execTime = 1.3f, .powerUsage = 0.7f}); - - return ndk::ScopedAStatus::ok(); -} - -std::vector<bool> SampleDriverFloatSlow::getSupportedOperationsImpl(const Model& model) const { - const size_t count = model.main.operations.size(); - std::vector<bool> supported(count); - for (size_t i = 0; i < count; i++) { - const Operation& operation = model.main.operations[i]; - if (!isExtensionOperationType(operation.type) && operation.inputs.size() > 0) { - const Operand& firstOperand = model.main.operands[operation.inputs[0]]; - supported[i] = firstOperand.type == OperandType::TENSOR_FLOAT32; - } - } - return supported; -} - -} // namespace sample_driver -} // namespace nn -} // namespace android - -using android::nn::sample_driver::SampleDriverFloatSlow; - -int main() { - std::shared_ptr<SampleDriverFloatSlow> driver = - ndk::SharedRefBase::make<SampleDriverFloatSlow>(); - return driver->run(); -}
diff --git a/driver/sample_aidl/SampleDriverFull.cpp b/driver/sample_aidl/SampleDriverFull.cpp deleted file mode 100644 index f984815..0000000 --- a/driver/sample_aidl/SampleDriverFull.cpp +++ /dev/null
@@ -1,64 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "SampleDriverFull" - -#include "SampleDriverFull.h" - -#include <nnapi/Validation.h> -#include <nnapi/hal/aidl/Conversions.h> - -#include <string> -#include <vector> - -#include "AidlHalUtils.h" -#include "LegacyUtils.h" -#include "SampleDriverUtils.h" - -namespace android { -namespace nn { -namespace sample_driver { - -ndk::ScopedAStatus SampleDriverFull::getCapabilities(aidl_hal::Capabilities* capabilities) { - android::nn::initVLogMask(); - VLOG(DRIVER) << "getCapabilities()"; - *capabilities = {.relaxedFloat32toFloat16PerformanceScalar = mPerf, - .relaxedFloat32toFloat16PerformanceTensor = mPerf, - .operandPerformance = nonExtensionOperandPerformance(mPerf), - .ifPerformance = mPerf, - .whilePerformance = mPerf}; - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus SampleDriverFull::getSupportedOperations( - const aidl_hal::Model& model, std::vector<bool>* supportedOperations) { - VLOG(DRIVER) << "getSupportedOperations()"; - const auto canonicalModel = convert(model); - if (!canonicalModel.has_value()) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, canonicalModel.error().message); - } - const size_t count = canonicalModel.value().main.operations.size(); - *supportedOperations = std::vector<bool>(count, true); - for (size_t i = 0; i < count; i++) { - const Operation& operation = canonicalModel.value().main.operations[i]; - supportedOperations->at(i) = !isExtensionOperationType(operation.type); - } - return ndk::ScopedAStatus::ok(); -} - -} // namespace sample_driver -} // namespace nn -} // namespace android
diff --git a/driver/sample_aidl/SampleDriverFull.h b/driver/sample_aidl/SampleDriverFull.h deleted file mode 100644 index 144af37..0000000 --- a/driver/sample_aidl/SampleDriverFull.h +++ /dev/null
@@ -1,44 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_FULL_H -#define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_FULL_H - -#include <vector> - -#include "SampleDriver.h" - -namespace android { -namespace nn { -namespace sample_driver { - -class SampleDriverFull : public SampleDriver { - public: - SampleDriverFull(const char* name, aidl_hal::PerformanceInfo perf) - : SampleDriver(name), mPerf(perf) {} - ndk::ScopedAStatus getCapabilities(aidl_hal::Capabilities* capabilities) override; - ndk::ScopedAStatus getSupportedOperations(const aidl_hal::Model& model, - std::vector<bool>* supportedOperations) override; - - private: - aidl_hal::PerformanceInfo mPerf; -}; - -} // namespace sample_driver -} // namespace nn -} // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_FULL_H
diff --git a/driver/sample_aidl/SampleDriverMinimal.cpp b/driver/sample_aidl/SampleDriverMinimal.cpp deleted file mode 100644 index d17e780..0000000 --- a/driver/sample_aidl/SampleDriverMinimal.cpp +++ /dev/null
@@ -1,93 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "SampleDriverMinimal" - -#include <android-base/logging.h> -#include <android/binder_auto_utils.h> - -#include <memory> -#include <thread> -#include <vector> - -#include "AidlHalUtils.h" -#include "SampleDriverPartial.h" - -namespace android { -namespace nn { -namespace sample_driver { - -class SampleDriverMinimal : public SampleDriverPartial { - public: - SampleDriverMinimal() : SampleDriverPartial("nnapi-sample_minimal") {} - ndk::ScopedAStatus getCapabilities(aidl_hal::Capabilities* capabilities) override; - - private: - std::vector<bool> getSupportedOperationsImpl(const Model& model) const override; -}; - -ndk::ScopedAStatus SampleDriverMinimal::getCapabilities(aidl_hal::Capabilities* capabilities) { - android::nn::initVLogMask(); - VLOG(DRIVER) << "getCapabilities()"; - - *capabilities = { - .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 0.4f, .powerUsage = 0.5f}, - .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 0.4f, .powerUsage = 0.5f}, - .operandPerformance = nonExtensionOperandPerformance({1.0f, 1.0f}), - .ifPerformance = {.execTime = 1.0f, .powerUsage = 1.0f}, - .whilePerformance = {.execTime = 1.0f, .powerUsage = 1.0f}}; - update(&capabilities->operandPerformance, aidl_hal::OperandType::TENSOR_FLOAT32, - {.execTime = 0.4f, .powerUsage = 0.5f}); - update(&capabilities->operandPerformance, aidl_hal::OperandType::FLOAT32, - {.execTime = 0.4f, .powerUsage = 0.5f}); - - return ndk::ScopedAStatus::ok(); -} - -std::vector<bool> SampleDriverMinimal::getSupportedOperationsImpl(const Model& model) const { - const size_t count = model.main.operations.size(); - std::vector<bool> supported(count); - // Simulate supporting just a few ops - for (size_t i = 0; i < count; i++) { - supported[i] = false; - const Operation& operation = model.main.operations[i]; - switch (operation.type) { - case OperationType::ADD: - case OperationType::CONCATENATION: - case OperationType::CONV_2D: { - const Operand& firstOperand = model.main.operands[operation.inputs[0]]; - if (firstOperand.type == OperandType::TENSOR_FLOAT32) { - supported[i] = true; - } - break; - } - default: - break; - } - } - return supported; -} - -} // namespace sample_driver -} // namespace nn -} // namespace android - -using android::nn::sample_driver::SampleDriverMinimal; - -int main() { - std::shared_ptr<SampleDriverMinimal> driver = ndk::SharedRefBase::make<SampleDriverMinimal>(); - return driver->run(); -}
diff --git a/driver/sample_aidl/SampleDriverPartial.cpp b/driver/sample_aidl/SampleDriverPartial.cpp deleted file mode 100644 index 45bf8de..0000000 --- a/driver/sample_aidl/SampleDriverPartial.cpp +++ /dev/null
@@ -1,73 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "SampleDriverPartial" - -#include "SampleDriverPartial.h" - -#include <android-base/logging.h> -#include <android/binder_auto_utils.h> -#include <nnapi/Validation.h> -#include <nnapi/hal/aidl/Conversions.h> -#include <nnapi/hal/aidl/Utils.h> - -#include <memory> -#include <thread> -#include <utility> -#include <vector> - -#include "SampleDriverUtils.h" - -namespace android { -namespace nn { -namespace sample_driver { - -ndk::ScopedAStatus SampleDriverPartial::getSupportedOperations( - const aidl_hal::Model& model, std::vector<bool>* supportedOperations) { - VLOG(DRIVER) << "getSupportedOperations()"; - const auto canonicalModel = convert(model); - if (!canonicalModel.has_value()) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, canonicalModel.error().message); - } - *supportedOperations = getSupportedOperationsImpl(canonicalModel.value()); - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus SampleDriverPartial::prepareModel( - const aidl_hal::Model& model, aidl_hal::ExecutionPreference preference, - aidl_hal::Priority priority, int64_t deadline, - const std::vector<ndk::ScopedFileDescriptor>&, - const std::vector<ndk::ScopedFileDescriptor>&, const std::vector<uint8_t>&, - const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback) { - const auto canonicalModel = convert(model); - if (!canonicalModel.has_value()) { - notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr); - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, canonicalModel.error().message); - } - std::vector<bool> supported = getSupportedOperationsImpl(canonicalModel.value()); - bool isModelFullySupported = - std::all_of(supported.begin(), supported.end(), [](bool v) { return v; }); - auto copiedModel = aidl_hal::utils::clone(model); - if (!copiedModel.has_value()) { - return toAStatus(aidl_hal::ErrorStatus::GENERAL_FAILURE, copiedModel.error().message); - } - return prepareModelBase(std::move(copiedModel).value(), this, preference, priority, deadline, - callback, isModelFullySupported); -} - -} // namespace sample_driver -} // namespace nn -} // namespace android
diff --git a/driver/sample_aidl/SampleDriverPartial.h b/driver/sample_aidl/SampleDriverPartial.h deleted file mode 100644 index 33de485..0000000 --- a/driver/sample_aidl/SampleDriverPartial.h +++ /dev/null
@@ -1,61 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_PARTIAL_H -#define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_PARTIAL_H - -#include <android-base/logging.h> - -#include <memory> -#include <thread> -#include <vector> - -#include "SampleDriver.h" - -namespace android { -namespace nn { -namespace sample_driver { - -// A base class for sample drivers that support only a subset of NNAPI -// operations. Classes of such drivers should inherit from this class and -// implement getSupportedOperationsImpl function which is used for filtering out -// unsupported ops. -class SampleDriverPartial : public SampleDriver { - public: - SampleDriverPartial(const char* name, const IOperationResolver* operationResolver = - BuiltinOperationResolver::get()) - : SampleDriver(name, operationResolver) {} - ndk::ScopedAStatus getSupportedOperations(const aidl_hal::Model& model, - std::vector<bool>* supportedOperations) override; - ndk::ScopedAStatus prepareModel( - const aidl_hal::Model& model, aidl_hal::ExecutionPreference preference, - aidl_hal::Priority priority, int64_t deadline, - const std::vector<ndk::ScopedFileDescriptor>& modelCache, - const std::vector<ndk::ScopedFileDescriptor>& dataCache, - const std::vector<uint8_t>& token, - const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback) override; - - protected: - // Given a valid NNAPI Model returns a boolean vector that indicates which - // ops in the model are supported by a driver. - virtual std::vector<bool> getSupportedOperationsImpl(const Model& model) const = 0; -}; - -} // namespace sample_driver -} // namespace nn -} // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_PARTIAL_H
diff --git a/driver/sample_aidl/SampleDriverQuant.cpp b/driver/sample_aidl/SampleDriverQuant.cpp deleted file mode 100644 index 10350ed..0000000 --- a/driver/sample_aidl/SampleDriverQuant.cpp +++ /dev/null
@@ -1,87 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "SampleDriverQuant" - -#include <android-base/logging.h> -#include <nnapi/hal/aidl/Conversions.h> - -#include <memory> -#include <thread> -#include <vector> - -#include "AidlHalUtils.h" -#include "SampleDriverPartial.h" - -namespace android { -namespace nn { -namespace sample_driver { - -class SampleDriverQuant : public SampleDriverPartial { - public: - SampleDriverQuant() : SampleDriverPartial("nnapi-sample_quant") {} - ndk::ScopedAStatus getCapabilities(aidl_hal::Capabilities* capabilities) override; - - private: - std::vector<bool> getSupportedOperationsImpl(const Model& model) const override; -}; - -ndk::ScopedAStatus SampleDriverQuant::getCapabilities(aidl_hal::Capabilities* capabilities) { - android::nn::initVLogMask(); - VLOG(DRIVER) << "getCapabilities()"; - - *capabilities = { - .relaxedFloat32toFloat16PerformanceScalar = {.execTime = 50.0f, .powerUsage = 1.0f}, - .relaxedFloat32toFloat16PerformanceTensor = {.execTime = 50.0f, .powerUsage = 1.0f}, - .operandPerformance = nonExtensionOperandPerformance({50.0f, 1.0f}), - .ifPerformance = {.execTime = 50.0f, .powerUsage = 1.0f}, - .whilePerformance = {.execTime = 50.0f, .powerUsage = 1.0f}}; - - return ndk::ScopedAStatus::ok(); -} - -static bool isQuantized(OperandType opType) { - return opType == OperandType::TENSOR_QUANT8_ASYMM || - opType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED; -} - -std::vector<bool> SampleDriverQuant::getSupportedOperationsImpl(const Model& model) const { - const size_t count = model.main.operations.size(); - std::vector<bool> supported(count); - for (size_t i = 0; i < count; i++) { - const Operation& operation = model.main.operations[i]; - if (!isExtensionOperationType(operation.type) && operation.inputs.size() > 0) { - const Operand& firstOperand = model.main.operands[operation.inputs[0]]; - supported[i] = isQuantized(firstOperand.type); - if (operation.type == OperationType::SELECT) { - const Operand& secondOperand = model.main.operands[operation.inputs[1]]; - supported[i] = isQuantized(secondOperand.type); - } - } - } - return supported; -} - -} // namespace sample_driver -} // namespace nn -} // namespace android - -using android::nn::sample_driver::SampleDriverQuant; - -int main() { - std::shared_ptr<SampleDriverQuant> driver = ndk::SharedRefBase::make<SampleDriverQuant>(); - return driver->run(); -}
diff --git a/driver/sample_aidl/SampleDriverUtils.cpp b/driver/sample_aidl/SampleDriverUtils.cpp deleted file mode 100644 index d7d4364..0000000 --- a/driver/sample_aidl/SampleDriverUtils.cpp +++ /dev/null
@@ -1,137 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "SampleDriverUtils.h" - -#include <aidl/android/hardware/common/NativeHandle.h> -#include <android/binder_auto_utils.h> -#include <android/binder_ibinder.h> -#include <nnapi/Validation.h> -#include <nnapi/hal/aidl/Conversions.h> -#include <nnapi/hal/aidl/Utils.h> -#include <utils/NativeHandle.h> - -#include <memory> -#include <string> -#include <thread> -#include <utility> - -#include "SampleDriver.h" - -namespace android { -namespace nn { -namespace sample_driver { - -void notify(const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback, - const aidl_hal::ErrorStatus& status, - const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel) { - const auto ret = callback->notify(status, preparedModel); - if (!ret.isOk()) { - LOG(ERROR) << "Error when calling IPreparedModelCallback::notify: " << ret.getDescription() - << " " << ret.getMessage(); - } -} - -ndk::ScopedAStatus toAStatus(aidl_hal::ErrorStatus errorStatus) { - if (errorStatus == aidl_hal::ErrorStatus::NONE) { - return ndk::ScopedAStatus::ok(); - } - return ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(errorStatus)); -} - -ndk::ScopedAStatus toAStatus(aidl_hal::ErrorStatus errorStatus, const std::string& errorMessage) { - if (errorStatus == aidl_hal::ErrorStatus::NONE) { - return ndk::ScopedAStatus::ok(); - } - return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage( - static_cast<int32_t>(errorStatus), errorMessage.c_str()); -} - -ndk::ScopedAStatus prepareModelBase( - aidl_hal::Model&& model, const SampleDriver* driver, - aidl_hal::ExecutionPreference preference, aidl_hal::Priority priority, int64_t halDeadline, - const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback, - bool isFullModelSupported) { - const uid_t userId = AIBinder_getCallingUid(); - if (callback.get() == nullptr) { - LOG(ERROR) << "invalid callback passed to prepareModelBase"; - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, - "invalid callback passed to prepareModelBase"); - } - const auto canonicalModel = convert(model); - if (!canonicalModel.has_value()) { - VLOG(DRIVER) << "invalid model passed to prepareModelBase"; - notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr); - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, - "invalid model passed to prepareModelBase"); - } - if (VLOG_IS_ON(DRIVER)) { - VLOG(DRIVER) << "prepareModelBase"; - logModelToInfo(canonicalModel.value()); - } - if (!aidl_hal::utils::valid(preference)) { - const std::string log_message = - "invalid execution preference passed to prepareModelBase: " + toString(preference); - VLOG(DRIVER) << log_message; - notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr); - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, log_message); - } - if (!aidl_hal::utils::valid(priority)) { - const std::string log_message = - "invalid priority passed to prepareModelBase: " + toString(priority); - VLOG(DRIVER) << log_message; - notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr); - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, log_message); - } - - if (!isFullModelSupported) { - VLOG(DRIVER) << "model is not fully supported"; - notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr); - return ndk::ScopedAStatus::ok(); - } - - if (halDeadline < -1) { - notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr); - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, - "Invalid deadline: " + toString(halDeadline)); - } - const auto deadline = makeDeadline(halDeadline); - if (hasDeadlinePassed(deadline)) { - notify(callback, aidl_hal::ErrorStatus::MISSED_DEADLINE_PERSISTENT, nullptr); - return ndk::ScopedAStatus::ok(); - } - - // asynchronously prepare the model from a new, detached thread - std::thread( - [driver, preference, userId, priority, callback](aidl_hal::Model&& model) { - std::shared_ptr<SamplePreparedModel> preparedModel = - ndk::SharedRefBase::make<SamplePreparedModel>(std::move(model), driver, - preference, userId, priority); - if (!preparedModel->initialize()) { - notify(callback, aidl_hal::ErrorStatus::INVALID_ARGUMENT, nullptr); - return; - } - notify(callback, aidl_hal::ErrorStatus::NONE, preparedModel); - }, - std::move(model)) - .detach(); - - return ndk::ScopedAStatus::ok(); -} - -} // namespace sample_driver -} // namespace nn -} // namespace android
diff --git a/driver/sample_aidl/SampleDriverUtils.h b/driver/sample_aidl/SampleDriverUtils.h deleted file mode 100644 index 7205318..0000000 --- a/driver/sample_aidl/SampleDriverUtils.h +++ /dev/null
@@ -1,48 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_UTILS_H -#define ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_UTILS_H - -#include <android/binder_auto_utils.h> - -#include <memory> -#include <string> - -#include "SampleDriver.h" - -namespace android { -namespace nn { -namespace sample_driver { - -void notify(const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback, - const aidl_hal::ErrorStatus& status, - const std::shared_ptr<aidl_hal::IPreparedModel>& preparedModel); - -ndk::ScopedAStatus prepareModelBase( - aidl_hal::Model&& model, const SampleDriver* driver, - aidl_hal::ExecutionPreference preference, aidl_hal::Priority priority, int64_t halDeadline, - const std::shared_ptr<aidl_hal::IPreparedModelCallback>& callback, - bool isFullModelSupported = true); - -ndk::ScopedAStatus toAStatus(aidl_hal::ErrorStatus errorStatus); -ndk::ScopedAStatus toAStatus(aidl_hal::ErrorStatus errorStatus, const std::string& errorMessage); - -} // namespace sample_driver -} // namespace nn -} // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_DRIVER_SAMPLE_AIDL_SAMPLE_DRIVER_UTILS_H
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-all.rc b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-all.rc deleted file mode 100644 index 4923289..0000000 --- a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-all.rc +++ /dev/null
@@ -1,4 +0,0 @@ -service neuralnetworks_hal_service_aidl_sample_all /vendor/bin/hw/android.hardware.neuralnetworks-service-sample-all - class hal - user system - group system
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-all.xml b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-all.xml deleted file mode 100644 index fea5fcc..0000000 --- a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-all.xml +++ /dev/null
@@ -1,6 +0,0 @@ -<manifest version="1.0" type="device"> - <hal format="aidl"> - <name>android.hardware.neuralnetworks</name> - <fqname>IDevice/nnapi-sample_all</fqname> - </hal> -</manifest>
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-fast.rc b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-fast.rc deleted file mode 100644 index de6b807..0000000 --- a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-fast.rc +++ /dev/null
@@ -1,4 +0,0 @@ -service neuralnetworks_hal_service_aidl_sample_float_fast /vendor/bin/hw/android.hardware.neuralnetworks-service-sample-float-fast - class hal - user system - group system
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-fast.xml b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-fast.xml deleted file mode 100644 index a245114..0000000 --- a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-fast.xml +++ /dev/null
@@ -1,6 +0,0 @@ -<manifest version="1.0" type="device"> - <hal format="aidl"> - <name>android.hardware.neuralnetworks</name> - <fqname>IDevice/nnapi-sample_float_fast</fqname> - </hal> -</manifest>
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-slow.rc b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-slow.rc deleted file mode 100644 index e99171a..0000000 --- a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-slow.rc +++ /dev/null
@@ -1,4 +0,0 @@ -service neuralnetworks_hal_service_aidl_sample_float_slow /vendor/bin/hw/android.hardware.neuralnetworks-service-sample-float-slow - class hal - user system - group system
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-slow.xml b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-slow.xml deleted file mode 100644 index e1126a4..0000000 --- a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-float-slow.xml +++ /dev/null
@@ -1,6 +0,0 @@ -<manifest version="1.0" type="device"> - <hal format="aidl"> - <name>android.hardware.neuralnetworks</name> - <fqname>IDevice/nnapi-sample_float_slow</fqname> - </hal> -</manifest>
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-minimal.rc b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-minimal.rc deleted file mode 100644 index 58ad570..0000000 --- a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-minimal.rc +++ /dev/null
@@ -1,4 +0,0 @@ -service neuralnetworks_hal_service_aidl_sample_minimal /vendor/bin/hw/android.hardware.neuralnetworks-service-sample-minimal - class hal - user system - group system
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-minimal.xml b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-minimal.xml deleted file mode 100644 index dcd2b1d..0000000 --- a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-minimal.xml +++ /dev/null
@@ -1,6 +0,0 @@ -<manifest version="1.0" type="device"> - <hal format="aidl"> - <name>android.hardware.neuralnetworks</name> - <fqname>IDevice/nnapi-sample_minimal</fqname> - </hal> -</manifest>
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-quant.rc b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-quant.rc deleted file mode 100644 index 3151db5..0000000 --- a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-quant.rc +++ /dev/null
@@ -1,4 +0,0 @@ -service neuralnetworks_hal_service_aidl_sample_quant /vendor/bin/hw/android.hardware.neuralnetworks-service-sample-quant - class hal - user system - group system
diff --git a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-quant.xml b/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-quant.xml deleted file mode 100644 index 30dc2ee..0000000 --- a/driver/sample_aidl/config/android.hardware.neuralnetworks-service-sample-quant.xml +++ /dev/null
@@ -1,6 +0,0 @@ -<manifest version="1.0" type="device"> - <hal format="aidl"> - <name>android.hardware.neuralnetworks</name> - <fqname>IDevice/nnapi-sample_quant</fqname> - </hal> -</manifest>
diff --git a/driver/sample_shim/Android.bp b/driver/sample_shim/Android.bp deleted file mode 100644 index a4e4d76..0000000 --- a/driver/sample_shim/Android.bp +++ /dev/null
@@ -1,99 +0,0 @@ -/* - * Copyright 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Prebuilt generated using 'generated_prebuilts.sh' -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - -cc_prebuilt_library_shared { - name: "neuralnetworks_sample_sl_driver_prebuilt", - - shared_libs: [ - "libbase", - "libcutils", - "liblog", - "libnativewindow", - ], - proprietary: true, - vendor: true, - // libnativewindow versioning trips this check. b/181227567 for fixing - allow_undefined_symbols: true, - target: { - android_x86_64: { - srcs: ["android_x86_64/neuralnetworks_sample_sl_driver_prebuilt.so"], - }, - android_x86: { - srcs: ["android_x86/neuralnetworks_sample_sl_driver_prebuilt.so"], - }, - android_arm64: { - srcs: ["android_arm64/neuralnetworks_sample_sl_driver_prebuilt.so"], - }, - android_arm: { - srcs: ["android_arm/neuralnetworks_sample_sl_driver_prebuilt.so"], - }, - }, - apex_available: ["//apex_available:platform"], -} - -cc_defaults { - name: "NeuralNetworksShimDriverAidl_defaults", - defaults: ["neuralnetworks_defaults"], - header_libs: [ - "libneuralnetworks_headers", - ], - cflags: [ - "-DNN_COMPATIBILITY_LIBRARY_BUILD", - ], - static_libs: [ - "android.hardware.common-V2-ndk_platform", - "android.hardware.graphics.common-V2-ndk_platform", - "android.hardware.neuralnetworks-V1-ndk_platform", - "libaidlcommonsupport", - "libarect", - "libcutils", - "libneuralnetworks_shim_static", - "neuralnetworks_supportlibrary_loader", - "neuralnetworks_utils_hal_aidl", - "neuralnetworks_utils_hal_common", - ], - shared_libs: [ - "libbase", - "libbinder_ndk", - "libhidlbase", - "libhidlmemory", - "liblog", - "libnativewindow", - "libutils", - "neuralnetworks_sample_sl_driver_prebuilt", - ], - -} - -cc_defaults { - name: "NeuralNetworksShimDriverAidl_server_defaults", - defaults: ["NeuralNetworksShimDriverAidl_defaults"], - relative_install_path: "hw", - proprietary: true, -} - -cc_binary { - name: "android.hardware.neuralnetworks-shim-service-sample", - srcs: ["ShimServiceSample.cpp"], - defaults: ["NeuralNetworksShimDriverAidl_server_defaults"], - init_rc: ["config/android.hardware.neuralnetworks-shim-service-sample.rc"], - vintf_fragments: ["config/android.hardware.neuralnetworks-shim-service-sample.xml"], -}
diff --git a/driver/sample_shim/ShellServiceSample.cpp b/driver/sample_shim/ShellServiceSample.cpp deleted file mode 100644 index 6c3eda5..0000000 --- a/driver/sample_shim/ShellServiceSample.cpp +++ /dev/null
@@ -1,133 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "ShellServiceSample" - -#include <NeuralNetworksShim.h> -#include <aidl/android/hardware/neuralnetworks/BnDevice.h> -#include <android-base/logging.h> -#include <android-base/scopeguard.h> -#include <android/binder_enums.h> -#include <android/binder_manager.h> -#include <android/binder_process.h> -#include <dlfcn.h> -#include <nnapi/hal/aidl/InvalidDevice.h> - -#include <algorithm> -#include <limits> -#include <string> -#include <type_traits> -#include <unordered_map> -#include <utility> -#include <vector> - -typedef struct NnApiSLDriverImpl NnApiSLDriverImpl; - -namespace aidl::android::hardware::neuralnetworks { -namespace { - -struct Names { - std::string driverName; - std::string serviceName; -}; - -void registerInvalidDevices(const std::vector<Names>& names) { - for (const auto& [_, name] : names) { - const auto invalidDevice = InvalidDevice::create(); - const std::string instance = std::string(IDevice::descriptor) + "/" + name; - LOG(INFO) << "Attempting service registration for " << instance; - const auto status = AServiceManager_registerLazyService(invalidDevice->asBinder().get(), - instance.c_str()); - if (status != STATUS_OK) { - LOG(ERROR) << "AServiceManager_registerLazyService failed for " << name - << ", error code " << status; - return; - } - } - ABinderProcess_setThreadPoolMaxThreadCount(15); - ABinderProcess_joinThreadPool(); -} - -int registerDevices(const std::string& driverPath, const std::vector<Names>& devices) { - // Load support library. - void* libHandle = dlopen(driverPath.c_str(), RTLD_LAZY | RTLD_LOCAL); - if (libHandle == nullptr) { - LOG(ERROR) << "Failed to load sample SL driver: " << driverPath; - registerInvalidDevices(devices); - return EXIT_FAILURE; - } - - // Load support library entry point. - using GetSlDriverImplFn = std::add_pointer_t<NnApiSLDriverImpl*()>; - GetSlDriverImplFn getSlDriverImpl = reinterpret_cast<GetSlDriverImplFn>( - dlsym(libHandle, "ANeuralNetworks_getSLDriverImpl")); - if (getSlDriverImpl == nullptr) { - LOG(ERROR) << "Failed to find ANeuralNetworks_getSLDriverImpl symbol in: " << driverPath; - registerInvalidDevices(devices); - return EXIT_FAILURE; - } - - // Call support library entry point to obtain functionality. - NnApiSLDriverImpl* impl = getSlDriverImpl(); - if (impl == nullptr) { - LOG(ERROR) << "ANeuralNetworks_getSLDriverImpl returned nullptr: " << driverPath; - registerInvalidDevices(devices); - return EXIT_FAILURE; - } - - ANeuralNetworksShimRegistrationParams* params; - ANeuralNetworksShimRegistrationParams_create(impl, ¶ms); - const auto guardParams = ::android::base::make_scope_guard( - [params] { ANeuralNetworksShimRegistrationParams_free(params); }); - - // The default is 15, use more only if there's more devices exposed. - ANeuralNetworksShimRegistrationParams_setNumberOfListenerThreads(params, 15); - ANeuralNetworksShimRegistrationParams_registerAsLazyService(params, /*asLazy=*/true); - ANeuralNetworksShimRegistrationParams_fallbackToMinimumSupportDevice(params, /*fallback=*/true); - - for (const auto& device : devices) { - ANeuralNetworksShimDeviceInfo* deviceInfo; - ANeuralNetworksShimDeviceInfo_create(&deviceInfo, device.driverName.c_str(), - device.serviceName.c_str()); - const auto guardDeviceInfo = ::android::base::make_scope_guard( - [deviceInfo] { ANeuralNetworksShimDeviceInfo_free(deviceInfo); }); - - ANeuralNetworksShimRegistrationParams_addDeviceInfo(params, deviceInfo); - } - - // Register the support library as a binderized AIDL service. - auto result = ANeuralNetworksShim_registerSupportLibraryService(params); - LOG(ERROR) << "ANeuralNetworksShim_registerSupportLibraryService returned with error status: " - << result; - - return EXIT_FAILURE; -} - -} // namespace -} // namespace aidl::android::hardware::neuralnetworks - -using aidl::android::hardware::neuralnetworks::Names; -using aidl::android::hardware::neuralnetworks::registerDevices; - -int main() { - const std::string driverPath = "/vendor/lib64/neuralnetworks_sample_sl_driver_prebuilt.so"; - - const std::vector<Names> devicesToRegister = { - {.driverName = "nnapi-sample_sl", .serviceName = "nnapi-sample_sl_updatable"}, - }; - - return registerDevices(driverPath, devicesToRegister); -}
diff --git a/driver/sample_shim/ShimServiceSample.cpp b/driver/sample_shim/ShimServiceSample.cpp deleted file mode 100644 index a99b09f..0000000 --- a/driver/sample_shim/ShimServiceSample.cpp +++ /dev/null
@@ -1,57 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "ShimServiceSample" - -#include <android-base/logging.h> -#include <android-base/scopeguard.h> -#include <dlfcn.h> - -#include "NeuralNetworksShim.h" -#include "SupportLibrarySymbols.h" - -int main() { - NnApiSLDriverImpl* impl = ANeuralNetworks_getSLDriverImpl(); - if (impl == nullptr) { - LOG(ERROR) << "ANeuralNetworks_getSLDriverImpl returned nullptr"; - return EXIT_FAILURE; - } - - ANeuralNetworksShimDeviceInfo* deviceInfo; - ANeuralNetworksShimDeviceInfo_create(&deviceInfo, - /*deviceName=*/"nnapi-sample_sl", - /*serviceName=*/"nnapi-sample_sl_shim"); - const auto guardDeviceInfo = android::base::make_scope_guard( - [deviceInfo] { ANeuralNetworksShimDeviceInfo_free(deviceInfo); }); - - ANeuralNetworksShimRegistrationParams* params; - ANeuralNetworksShimRegistrationParams_create(impl, ¶ms); - const auto guardParams = android::base::make_scope_guard( - [params] { ANeuralNetworksShimRegistrationParams_free(params); }); - ANeuralNetworksShimRegistrationParams_addDeviceInfo(params, deviceInfo); - // The default is 15, use more only if there's more devices exposed. - ANeuralNetworksShimRegistrationParams_setNumberOfListenerThreads(params, 15); - ANeuralNetworksShimRegistrationParams_registerAsLazyService(params, /*asLazy=*/false); - ANeuralNetworksShimRegistrationParams_fallbackToMinimumSupportDevice(params, - /*fallback=*/false); - - auto result = ANeuralNetworksShim_registerSupportLibraryService(params); - - LOG(ERROR) << "ANeuralNetworksShim_registerSupportLibraryService returned with error status: " - << result; - - return EXIT_FAILURE; -}
diff --git a/driver/sample_shim/android_arm/neuralnetworks_sample_sl_driver_prebuilt.so b/driver/sample_shim/android_arm/neuralnetworks_sample_sl_driver_prebuilt.so deleted file mode 100755 index 7c597b6..0000000 --- a/driver/sample_shim/android_arm/neuralnetworks_sample_sl_driver_prebuilt.so +++ /dev/null Binary files differ
diff --git a/driver/sample_shim/android_arm64/neuralnetworks_sample_sl_driver_prebuilt.so b/driver/sample_shim/android_arm64/neuralnetworks_sample_sl_driver_prebuilt.so deleted file mode 100755 index 779817a..0000000 --- a/driver/sample_shim/android_arm64/neuralnetworks_sample_sl_driver_prebuilt.so +++ /dev/null Binary files differ
diff --git a/driver/sample_shim/android_x86/neuralnetworks_sample_sl_driver_prebuilt.so b/driver/sample_shim/android_x86/neuralnetworks_sample_sl_driver_prebuilt.so deleted file mode 100755 index f87707e..0000000 --- a/driver/sample_shim/android_x86/neuralnetworks_sample_sl_driver_prebuilt.so +++ /dev/null Binary files differ
diff --git a/driver/sample_shim/android_x86_64/neuralnetworks_sample_sl_driver_prebuilt.so b/driver/sample_shim/android_x86_64/neuralnetworks_sample_sl_driver_prebuilt.so deleted file mode 100755 index d56ddfd..0000000 --- a/driver/sample_shim/android_x86_64/neuralnetworks_sample_sl_driver_prebuilt.so +++ /dev/null Binary files differ
diff --git a/driver/sample_shim/config/android.hardware.neuralnetworks-shell-service-sample.rc b/driver/sample_shim/config/android.hardware.neuralnetworks-shell-service-sample.rc deleted file mode 100644 index 5458ea5..0000000 --- a/driver/sample_shim/config/android.hardware.neuralnetworks-shell-service-sample.rc +++ /dev/null
@@ -1,7 +0,0 @@ -service neuralnetworks_hal_service_shell_sample /vendor/bin/hw/android.hardware.neuralnetworks-shell-service-sample - interface aidl android.hardware.neuralnetworks.IDevice/nnapi-sample_sl_updatable - disabled - oneshot - class hal - user system - group system
diff --git a/driver/sample_shim/config/android.hardware.neuralnetworks-shell-service-sample.xml b/driver/sample_shim/config/android.hardware.neuralnetworks-shell-service-sample.xml deleted file mode 100644 index cd32298..0000000 --- a/driver/sample_shim/config/android.hardware.neuralnetworks-shell-service-sample.xml +++ /dev/null
@@ -1,6 +0,0 @@ -<manifest version="1.0" type="device"> - <hal format="aidl" updatable-via-apex="com.android.neuralnetworks"> - <name>android.hardware.neuralnetworks</name> - <fqname>IDevice/nnapi-sample_sl_updatable</fqname> - </hal> -</manifest>
diff --git a/driver/sample_shim/config/android.hardware.neuralnetworks-shim-service-sample.rc b/driver/sample_shim/config/android.hardware.neuralnetworks-shim-service-sample.rc deleted file mode 100644 index ef0a669..0000000 --- a/driver/sample_shim/config/android.hardware.neuralnetworks-shim-service-sample.rc +++ /dev/null
@@ -1,4 +0,0 @@ -service neuralnetworks_hal_service_shim_sample /vendor/bin/hw/android.hardware.neuralnetworks-shim-service-sample - class hal - user system - group system
diff --git a/driver/sample_shim/config/android.hardware.neuralnetworks-shim-service-sample.xml b/driver/sample_shim/config/android.hardware.neuralnetworks-shim-service-sample.xml deleted file mode 100644 index 9fd6eb8..0000000 --- a/driver/sample_shim/config/android.hardware.neuralnetworks-shim-service-sample.xml +++ /dev/null
@@ -1,6 +0,0 @@ -<manifest version="1.0" type="device"> - <hal format="aidl"> - <name>android.hardware.neuralnetworks</name> - <fqname>IDevice/nnapi-sample_sl_shim</fqname> - </hal> -</manifest>
diff --git a/driver/sample_shim/generate_prebuilts.sh b/driver/sample_shim/generate_prebuilts.sh deleted file mode 100755 index 812a5a9..0000000 --- a/driver/sample_shim/generate_prebuilts.sh +++ /dev/null
@@ -1,34 +0,0 @@ -#!/bin/bash -# -# Generate sample SL driver prebuilts - -eval set -- "$OPTS" -if [[ -z "$ANDROID_BUILD_TOP" ]]; then - echo ANDROID_BUILD_TOP not set, bailing out - echo you must run lunch before running this script - exit 1 -fi - -set -e -cd $ANDROID_BUILD_TOP - -source build/envsetup.sh -ARCHS="x86,arm,arm64,x86_64" -SAMPLE_SL_DRIVER="neuralnetworks_sample_sl_driver" - -for arch in ${ARCHS//,/ } -do - lunch "aosp_${arch}-userdebug" - - LIB=lib - if [[ $arch =~ "64" ]]; then - LIB=lib64 - fi - - TMPFILE=$(mktemp) - build/soong/soong_ui.bash --make-mode ${SAMPLE_SL_DRIVER} 2>&1 | tee ${TMPFILE} - TARGETDIR=packages/modules/NeuralNetworks/driver/sample_shim/android_${arch}/neuralnetworks_sample_sl_driver_prebuilt.so - mkdir -p ${TARGETDIR%/*} - cp $OUT/system/${LIB}/neuralnetworks_sample_sl_driver.so ${TARGETDIR} -done -
diff --git a/extensions/example/fibonacci/Android.bp b/extensions/example/fibonacci/Android.bp index 878868f..ebcbdb4 100644 --- a/extensions/example/fibonacci/Android.bp +++ b/extensions/example/fibonacci/Android.bp
@@ -1,7 +1,3 @@ -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - cc_library_headers { name: "neuralnetworks_example_fibonacci_extension", host_supported: false,
diff --git a/runtime/Android.bp b/runtime/Android.bp index 52b3e92..d5f7787 100644 --- a/runtime/Android.bp +++ b/runtime/Android.bp
@@ -14,41 +14,6 @@ * limitations under the License. */ -package { - default_applicable_licenses: [ - "packages_modules_NeuralNetworks_runtime_license", - ], -} - -// Added automatically by a large-scale-change that took the approach of -// 'apply every license found to every target'. While this makes sure we respect -// every license restriction, it may not be entirely correct. -// -// e.g. GPL in an MIT project might only apply to the contrib/ directory. -// -// Please consider splitting the single license below into multiple licenses, -// taking care not to lose any license_kind information, and overriding the -// default license using the 'licenses: [...]' property on targets as needed. -// -// For unused files, consider creating a 'fileGroup' with "//visibility:private" -// to attach the license to, and including a comment whether the files may be -// used in the current project. -// See: http://go/android-license-faq -license { - name: "packages_modules_NeuralNetworks_runtime_license", - visibility: [":__subpackages__"], - license_kinds: [ - "SPDX-license-identifier-Apache-2.0", - "SPDX-license-identifier-Artistic", - "SPDX-license-identifier-BSD", - "SPDX-license-identifier-MPL", - "SPDX-license-identifier-MPL-2.0", - ], - license_text: [ - "NOTICE", - ], -} - cc_library_headers { name: "libneuralnetworks_headers", host_supported: false, @@ -56,20 +21,14 @@ export_include_dirs: ["include"], apex_available: [ "com.android.neuralnetworks", - "test_com.android.neuralnetworks", // Due to the dependency from libneuralnetworks_common + "test_com.android.neuralnetworks", + // Due to the dependency from libneuralnetworks_common // that is available to the platform - "//apex_available:platform", ], } cc_library_headers { - name: "libneuralnetworks_headers_ndk", - export_include_dirs: ["include"], - sdk_version: "current", -} - -cc_library_headers { name: "libneuralnetworks_private_headers", host_supported: false, export_include_dirs: ["."], @@ -82,11 +41,10 @@ // b/109953668, disable OpenMP // openmp: true, srcs: [ - "AppInfoFetcher.cpp", "BurstBuilder.cpp", + "Callbacks.cpp", "CompilationBuilder.cpp", "ExecutionBuilder.cpp", - "ExecutionCallback.cpp", "ExecutionPlan.cpp", "Manager.cpp", "Memory.cpp", @@ -94,6 +52,7 @@ "ModelBuilder.cpp", "NeuralNetworks.cpp", "TypeManager.cpp", + "VersionedInterfaces.cpp", ], target: { @@ -114,16 +73,12 @@ // TODO(pszczepaniak, b/144488395): Use system libnativewindow, // this would remove half of dependencies here. static_libs: [ - "android.hardware.common-V2-ndk_platform", - "android.hardware.graphics.common-V2-ndk_platform", - "android.hardware.neuralnetworks-V1-ndk_platform", "[email protected]", "[email protected]", "[email protected]", "[email protected]", "[email protected]", "[email protected]", - "libaidlcommonsupport", "libbase", "libcrypto_static", "libcutils", @@ -134,9 +89,9 @@ "libmath", "libneuralnetworks_common", "libprocessgroup", + "libsync", "libtextclassifier_hash_static", "libutils", - "neuralnetworks_types", ], stl: "libc++_static", @@ -146,7 +101,6 @@ ], shared_libs: [ - "libbinder_ndk", "libcgrouprc", "libvndksupport", ], @@ -162,10 +116,6 @@ cc_library_shared { name: "libneuralnetworks", - llndk: { - symbol_file: "libneuralnetworks.map.txt", - override_export_include_dirs: ["include"], - }, defaults: [ "libneuralnetworks_defaults", "neuralnetworks_defaults", @@ -178,7 +128,6 @@ stubs: { versions: [ "30", - "31", ], symbol_file: "libneuralnetworks.map.txt", }, @@ -194,54 +143,11 @@ apex_available: ["//apex_available:platform"], } -cc_library_static { - name: "libneuralnetworks_cl", - defaults: [ - "neuralnetworks_cl_defaults", - "neuralnetworks_defaults", - ], - apex_available: ["//apex_available:platform"], - // b/109953668, disable OpenMP - // openmp: true, - srcs: [ - "BurstBuilder.cpp", - "CompilationBuilder.cpp", - "ExecutionBuilder.cpp", - "ExecutionCallback.cpp", - "ExecutionPlan.cpp", - "Manager.cpp", - "Memory.cpp", - "ModelArgumentInfo.cpp", - "ModelBuilder.cpp", - "NeuralNetworks.cpp", - "TypeManager.cpp", - ], - static_libs: [ - "libbase_ndk", - "libcrypto_static", - "libneuralnetworks_common_cl", - "neuralnetworks_types_cl", - ], - shared_libs: [ - "libnativewindow", - ], - header_libs: [ - "libneuralnetworks_headers_ndk", - "neuralnetworks_supportlibrary_types_ndk", - ], - export_header_lib_headers: [ - "libneuralnetworks_headers_ndk", - ], -} - ndk_headers { name: "libneuralnetworks_ndk_headers", from: "include", to: "android", - srcs: [ - "include/NeuralNetworks.h", - "include/NeuralNetworksTypes.h", - ], + srcs: ["include/NeuralNetworks.h"], license: "NOTICE", } @@ -251,3 +157,9 @@ // Android O-MR1 first_version: "27", } + +llndk_library { + name: "libneuralnetworks", + symbol_file: "libneuralnetworks.map.txt", + export_include_dirs: ["include"], +}
diff --git a/runtime/AppInfoFetcher.cpp b/runtime/AppInfoFetcher.cpp deleted file mode 100644 index 7b90fc6..0000000 --- a/runtime/AppInfoFetcher.cpp +++ /dev/null
@@ -1,74 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "AppInfoFetcher" - -#include "AppInfoFetcher.h" - -#include <PackageInfo.h> -#include <Utils.h> -#include <android-base/file.h> -#include <android-base/properties.h> -#include <binder/IServiceManager.h> -#include <procpartition/procpartition.h> - -#include <algorithm> -#include <limits> -#include <map> -#include <memory> -#include <string> -#include <string_view> -#include <vector> - -namespace android { -namespace nn { - -namespace { - -// Query PackageManagerNative service about Android app properties. -// On success, it will populate appInfo->app* fields. -bool fetchAppPackageLocationInfo(uid_t uid, AppInfoFetcher::AppInfo* appInfo) { - ANeuralNetworks_PackageInfo packageInfo; - if (!ANeuralNetworks_fetch_PackageInfo(uid, &packageInfo)) { - return false; - } - appInfo->appPackageName = packageInfo.appPackageName; - appInfo->appIsSystemApp = packageInfo.appIsSystemApp; - appInfo->appIsOnVendorImage = packageInfo.appIsOnVendorImage; - appInfo->appIsOnProductImage = packageInfo.appIsOnProductImage; - - ANeuralNetworks_free_PackageInfo(&packageInfo); - return true; -} - -} // namespace - -AppInfoFetcher::AppInfoFetcher() - : appInfo({.binaryPath = ::android::procpartition::getExe(getpid()), - .appPackageName = "", - .appIsSystemApp = false, - .appIsOnVendorImage = false, - .appIsOnProductImage = false}) { - if (appInfo.binaryPath == "/system/bin/app_process64" || - appInfo.binaryPath == "/system/bin/app_process32") { - if (!fetchAppPackageLocationInfo(getuid(), &appInfo)) { - LOG(ERROR) << "Failed to get app information from package_manager_native"; - } - } -} - -} // namespace nn -} // namespace android
diff --git a/runtime/AppInfoFetcher.h b/runtime/AppInfoFetcher.h deleted file mode 100644 index 8d7618f..0000000 --- a/runtime/AppInfoFetcher.h +++ /dev/null
@@ -1,62 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_APP_INFO_FETCHER_H -#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_APP_INFO_FETCHER_H - -#include <string> - -namespace android { -namespace nn { - -// Manages client app information. -// -// This class gathers information about client application -// and provides a unified way to access it. -class AppInfoFetcher { - public: - static AppInfoFetcher* get() { - static AppInfoFetcher info; - return &info; - } - - // Collection of NNAPI client app-related information - struct AppInfo { - // Path of the binary (/proc/$PID/exe) - std::string binaryPath; - // Package name of the Android app (empty string if not Android app). - std::string appPackageName; - // Is the app a system app? (false if not an Android app) - bool appIsSystemApp; - // Is the app preinstalled on vendor image? (false if not an Android app) - bool appIsOnVendorImage; - // Is the app preinstalled on product image? (false if not an Android app) - bool appIsOnProductImage; - }; - - // Get App-replated information - const AppInfo& getAppInfo() const { return appInfo; } - - private: - AppInfoFetcher(); - - AppInfo appInfo; -}; - -} // namespace nn -} // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_APP_PACKAGE_INFO_H
diff --git a/runtime/BurstBuilder.cpp b/runtime/BurstBuilder.cpp index 8383fa4..f8aa6be 100644 --- a/runtime/BurstBuilder.cpp +++ b/runtime/BurstBuilder.cpp
@@ -18,19 +18,14 @@ #include "BurstBuilder.h" -#include <nnapi/IBurst.h> - -#include <memory> -#include <utility> -#include <vector> - #include "CompilationBuilder.h" +#include "ExecutionBurstController.h" namespace android { namespace nn { BurstBuilder::BurstBuilder(const CompilationBuilder* compilation, - std::vector<SharedBurst> burstControllers) + std::vector<std::shared_ptr<ExecutionBurstController>> burstControllers) : mCompilation(compilation), mBurstControllers(std::move(burstControllers)) {} bool BurstBuilder::tryLock() { @@ -46,7 +41,7 @@ return mCompilation; } -SharedBurst BurstBuilder::getControllerAt(size_t index) const { +std::shared_ptr<ExecutionBurstController> BurstBuilder::getControllerAt(size_t index) const { return index < mBurstControllers.size() ? mBurstControllers[index] : nullptr; }
diff --git a/runtime/BurstBuilder.h b/runtime/BurstBuilder.h index c719ba6..6a3ba78 100644 --- a/runtime/BurstBuilder.h +++ b/runtime/BurstBuilder.h
@@ -17,11 +17,10 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_BURST_BUILDER_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_BURST_BUILDER_H -#include <nnapi/IBurst.h> - #include <atomic> #include <memory> #include <vector> +#include "ExecutionBurstController.h" namespace android { namespace nn { @@ -40,18 +39,19 @@ class BurstBuilder { public: - BurstBuilder(const CompilationBuilder* compilation, std::vector<SharedBurst> burstControllers); + BurstBuilder(const CompilationBuilder* compilation, + std::vector<std::shared_ptr<ExecutionBurstController>> burstControllers); bool tryLock(); void unlock(); const CompilationBuilder* getCompilation() const; - SharedBurst getControllerAt(size_t index) const; + std::shared_ptr<ExecutionBurstController> getControllerAt(size_t index) const; private: std::atomic_flag mCurrentlyRunning = ATOMIC_FLAG_INIT; const CompilationBuilder* mCompilation; - std::vector<SharedBurst> mBurstControllers; + std::vector<std::shared_ptr<ExecutionBurstController>> mBurstControllers; }; } // namespace nn
diff --git a/runtime/Callbacks.cpp b/runtime/Callbacks.cpp new file mode 100644 index 0000000..6a81b9c --- /dev/null +++ b/runtime/Callbacks.cpp
@@ -0,0 +1,256 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Callbacks" + +#include "Callbacks.h" + +#include <android-base/logging.h> +#include <limits> +#include <utility> +#include <vector> + +namespace android::nn { + +using namespace hal; + +constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(), + .timeInDriver = std::numeric_limits<uint64_t>::max()}; + +// PreparedModelCallback methods begin here + +Return<void> PreparedModelCallback::notifyInternal(bool deadObject, ErrorStatus errorStatus, + const sp<V1_0::IPreparedModel>& preparedModel) { + { + std::lock_guard<std::mutex> hold(mMutex); + + // quick-return if object has already been notified + if (mNotified) { + return Void(); + } + + // store results and mark as notified + mDeadObject = deadObject; + mErrorStatus = errorStatus; + mPreparedModel = preparedModel; + mNotified = true; + } + + mCondition.notify_all(); + return Void(); +} + +Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus errorStatus, + const sp<V1_0::IPreparedModel>& preparedModel) { + return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), preparedModel); +} + +Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus errorStatus, + const sp<V1_2::IPreparedModel>& preparedModel) { + return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), preparedModel); +} + +Return<void> PreparedModelCallback::notify_1_3(ErrorStatus errorStatus, + const sp<V1_3::IPreparedModel>& preparedModel) { + return notifyInternal(false, errorStatus, preparedModel); +} + +void PreparedModelCallback::notifyAsDeadObject() { + notifyInternal(true, ErrorStatus::GENERAL_FAILURE, nullptr); +} + +void PreparedModelCallback::wait() const { + std::unique_lock<std::mutex> lock(mMutex); + mCondition.wait(lock, [this] { return mNotified; }); +} + +ErrorStatus PreparedModelCallback::getStatus() const { + wait(); + return mErrorStatus; +} + +sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() const { + wait(); + return mPreparedModel; +} + +bool PreparedModelCallback::isDeadObject() const { + wait(); + return mDeadObject; +} + +// ExecutionCallback methods begin here + +Return<void> ExecutionCallback::notify(V1_0::ErrorStatus errorStatus) { + return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), {}, kNoTiming); +} + +Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus errorStatus, + const hidl_vec<OutputShape>& outputShapes, + const Timing& timing) { + return notifyInternal(false, static_cast<ErrorStatus>(errorStatus), outputShapes, timing); +} + +Return<void> ExecutionCallback::notify_1_3(V1_3::ErrorStatus errorStatus, + const hidl_vec<OutputShape>& outputShapes, + const Timing& timing) { + return notifyInternal(false, errorStatus, outputShapes, timing); +} + +void ExecutionCallback::notifyAsDeadObject() { + notifyInternal(true, ErrorStatus::GENERAL_FAILURE, {}, kNoTiming); +} + +void ExecutionCallback::wait() const { + std::unique_lock<std::mutex> lock(mMutex); + mCondition.wait(lock, [this] { return mNotified; }); + + /* + * Note that we cannot call std::thread::join from ExecutionCallback's + * destructor: ExecutionCallback is intended to be reference counted, and it + * is possible that the reference count drops to zero in the bound thread, + * causing the bound thread to call this destructor. If a thread tries to + * join itself, it throws an exception, producing a message like the + * following: + * + * terminating with uncaught exception of type std::__1::system_error: + * thread::join failed: Resource deadlock would occur + */ + if (mThread.joinable()) { + mThread.join(); + } +} + +ErrorStatus ExecutionCallback::getStatus() const { + wait(); + return mErrorStatus; +} + +const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() const { + wait(); + return mOutputShapes; +} + +Timing ExecutionCallback::getTiming() const { + wait(); + return mTiming; +} + +bool ExecutionCallback::isDeadObject() const { + wait(); + return mDeadObject; +} + +bool ExecutionCallback::bindThread(std::thread asyncThread) { + std::lock_guard<std::mutex> lock(mMutex); + + // Ensure ExecutionCallback object does not already have a thread bound + if (mThread.joinable()) { + LOG(ERROR) << "ExecutionCallback::bindThread -- a thread has already been bound to this " + "callback object"; + return false; + } + + // Ensure the new thread is valid + if (!asyncThread.joinable()) { + LOG(ERROR) << "ExecutionCallback::bindThread -- the new thread is not joinable"; + return false; + } + + mThread = std::move(asyncThread); + return true; +} + +void ExecutionCallback::setOnFinish(const ExecutionFinish& finish) { + std::lock_guard<std::mutex> hold(mMutex); + + // Ensure ExecutionCallback object does not already have a "finish" callback + if (mOnFinish != nullptr) { + LOG(ERROR) << "ExecutionCallback::setOnFinish -- object already has a \"finish\" callback"; + return; + } + + // Ensure new "finish" callback is valid + if (finish == nullptr) { + LOG(ERROR) << "ExecutionCallback::setOnFinish -- \"finish\" callback is invalid"; + return; + } + + // Essure ExecutionCallback object has not already been notified + if (mNotified) { + LOG(ERROR) << "ExecutionCallback::setOnFinish -- ExecutionCallback has already been " + "notified with results"; + return; + } + + mOnFinish = finish; +} + +Return<void> ExecutionCallback::notifyInternal(bool deadObject, ErrorStatus errorStatus, + std::vector<OutputShape> outputShapes, + Timing timing) { + // check results + if (!deadObject) { + if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + // outputShapes must not be empty if OUTPUT_INSUFFICIENT_SIZE. + if (outputShapes.size() == 0) { + LOG(ERROR) + << "Notified with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE"; + errorStatus = ErrorStatus::GENERAL_FAILURE; + outputShapes = {}; + timing = kNoTiming; + } + } else if (errorStatus != ErrorStatus::NONE) { + // outputShapes must be empty if errorStatus is neither NONE nor + // OUTPUT_INSUFFICIENT_SIZE. + if (outputShapes.size() != 0) { + LOG(ERROR) << "Notified with non-empty output shape vector when error status is " + "neither NONE nor OUTPUT_INSUFFICIENT_SIZE"; + errorStatus = ErrorStatus::GENERAL_FAILURE; + outputShapes = {}; + timing = kNoTiming; + } + } + } + + // store results + { + std::lock_guard<std::mutex> hold(mMutex); + + // quick-return if object has already been notified + if (mNotified) { + return Void(); + } + + mDeadObject = deadObject; + mErrorStatus = errorStatus; + mOutputShapes = std::move(outputShapes); + mTiming = timing; + mNotified = true; + + if (mOnFinish != nullptr) { + ErrorStatus status = mOnFinish(mErrorStatus, mOutputShapes); + mOnFinish = nullptr; + if (status != ErrorStatus::NONE) { + mErrorStatus = status; + } + } + } + mCondition.notify_all(); + return Void(); +} + +} // namespace android::nn
diff --git a/runtime/Callbacks.h b/runtime/Callbacks.h new file mode 100644 index 0000000..7537025 --- /dev/null +++ b/runtime/Callbacks.h
@@ -0,0 +1,482 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_CALLBACKS_H +#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_CALLBACKS_H + +#include "HalInterfaces.h" + +#include <android-base/thread_annotations.h> +#include <condition_variable> +#include <functional> +#include <mutex> +#include <thread> +#include <vector> + +/* + * The Callback classes are used internally by the NeuralNetworks runtime to + * synchronize between different threads. An asynchronous task is launched + * paired with a callback object. When a client thread requires the output being + * generated by the asynchronous task, the client thread can wait for the result + * and be blocked until it has completed. Any wait may safely be called + * concurrently, even on the same callback object. When the asynchronous task + * has finished its workload, it must immediately call "notify*". If the + * asynchronous task has failed to launch, the function that tried to launch the + * asynchronous task must immediately call "notify*". This "notify*" call + * awakens any client threads waiting on the callback object. + * + * These classes exist to enable synchronization across HIDL. When + * synchronization is only required in the same process, consider using + * std::future, std::mutex, std::condition_variable, or std::experimental::latch + * instead. + */ + +namespace android::nn { + +/** + * The PreparedModelCallback class is used to receive the error status of + * preparing a model as well as the prepared model from a task executing + * asynchronously with respect to the runtime. If a calling thread calls wait + * or get* on a PreparedModelCallback object and the corresponding asynchronous + * task has not finished preparing the model, the calling thread will block + * until the asynchronous task has called notify*. + * + * If the callback object is notified more than once, only the results of the + * first call to notify* are used, and the results from subsequent calls are + * discarded. + * + * This callback object is passed as an argument to IDevice::prepareModel*. + */ +class PreparedModelCallback : public hal::IPreparedModelCallback { + public: + /** + * IPreparedModelCallback::notify marks the callback object with the return + * status of the asynchronous model preparation along with the prepared + * model, and allows all prior and future wait calls on the + * PreparedModelCallback object to proceed. + * + * One of IPreparedModelCallback::notify, IPreparedModelCallback::notify_1_2, + * or IPreparedModelCallback::notify_1_3 must be called on a given + * PreparedModelCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from asynchronously preparing the + * model; will be: + * - NONE if the asynchronous preparation was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - INVALID_ARGUMENT if the input model is invalid + * @param preparedModel Returned model that has been prepared for execution, + * nullptr if the model was unable to be prepared. + */ + hal::Return<void> notify(hal::V1_0::ErrorStatus status, + const sp<hal::V1_0::IPreparedModel>& preparedModel) override; + + /** + * IPreparedModelCallback::notify_1_2 marks the callback object with the + * return status of the asynchronous model preparation along with the + * prepared model, and allows all prior and future wait calls on the + * PreparedModelCallback object to proceed. + * + * One of IPreparedModelCallback::notify, IPreparedModelCallback::notify_1_2, + * or IPreparedModelCallback::notify_1_3 must be called on a given + * PreparedModelCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from asynchronously preparing the + * model; will be: + * - NONE if the asynchronous preparation was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - INVALID_ARGUMENT if the input model is invalid + * @param preparedModel Returned model that has been prepared for execution, + * nullptr if the model was unable to be prepared. + */ + hal::Return<void> notify_1_2(hal::V1_0::ErrorStatus status, + const sp<hal::V1_2::IPreparedModel>& preparedModel) override; + + /** + * IPreparedModelCallback::notify_1_3 marks the callback object with the + * return status of the asynchronous model preparation along with the + * prepared model, and allows all prior and future wait calls on the + * PreparedModelCallback object to proceed. + * + * One of IPreparedModelCallback::notify, IPreparedModelCallback::notify_1_2, + * or IPreparedModelCallback::notify_1_3 must be called on a given + * PreparedModelCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from asynchronously preparing the + * model; will be: + * - NONE if the asynchronous preparation was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - INVALID_ARGUMENT if the input model is invalid + * - MISSED_DEADLINE_* if the deadline could not be met + * - RESOURCE_EXHAUSTED_* if the task was aborted by the driver + * @param preparedModel Returned model that has been prepared for execution, + * nullptr if the model was unable to be prepared. + */ + hal::Return<void> notify_1_3(hal::V1_3::ErrorStatus status, + const sp<hal::V1_3::IPreparedModel>& preparedModel) override; + + /** + * Mark the callback object as a dead object. This acts as a call to notify. + */ + void notifyAsDeadObject(); + + /** + * PreparedModelCallback::wait blocks until notify* has been called on the + * callback object. + */ + void wait() const; + + /** + * Retrieves the error status returned from the asynchronous task launched + * by IDevice::prepareModel*. If IDevice::prepareModel* has not finished + * asynchronously preparing the model, this call will block until the + * asynchronous task notifies the object. + * + * @return status Error status returned from asynchronously preparing the + * model; will be: + * - NONE if the asynchronous preparation was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - INVALID_ARGUMENT if the input model is invalid + * - MISSED_DEADLINE_* if the deadline could not be met + * - RESOURCE_EXHAUSTED_* if the task was aborted by the driver + * - DEAD_OBJECT if the driver crashed without returning a result + */ + hal::V1_3::ErrorStatus getStatus() const; + + /** + * Retrieves the model that has been prepared for execution from the + * asynchronous task launched by IDevice::prepareModel*. If + * IDevice::prepareModel* has not finished asynchronously preparing the + * model, this call will block until the asynchronous task notifies the + * object. + * + * @return preparedModel Returned model that has been prepared for + * execution, nullptr if the model was unable to be prepared. + */ + sp<hal::V1_0::IPreparedModel> getPreparedModel() const; + + /** + * Queries whether the object is dead. + * + * @return 'true' if dead, 'false' otherwise. + */ + bool isDeadObject() const; + + private: + hal::Return<void> notifyInternal(bool deadObject, hal::ErrorStatus errorStatus, + const sp<hal::V1_0::IPreparedModel>& preparedModel); + + mutable std::mutex mMutex; + mutable std::condition_variable mCondition; + bool mNotified GUARDED_BY(mMutex) = false; + bool mDeadObject = false; + hal::ErrorStatus mErrorStatus = hal::ErrorStatus::GENERAL_FAILURE; + sp<hal::V1_0::IPreparedModel> mPreparedModel; +}; + +/** + * The ExecutionCallback class is used to receive the results of the execution + * from a task executing asynchronously with respect to the runtime. If a + * calling thread calls wait or get* on a ExecutionCallback object and the + * corresponding asynchronous task has not finished the execution, the calling + * thread will block until the asynchronous task has called one of the notify* + * methods. + * + * If the callback object is notified more than once, only the results of the + * first call to notify* are used, and the results from subsequent calls are + * discarded. + * + * This callback object is passed as an argument to IPreparedModel::execute*. + */ +class ExecutionCallback : public hal::IExecutionCallback { + using ExecutionFinish = + std::function<hal::ErrorStatus(hal::ErrorStatus, const std::vector<hal::OutputShape>&)>; + + public: + /** + * IExecutionCallback::notify marks the callback object with the return + * status of the asynchronous execution that held this callback and enables + * all prior and future wait calls on the ExecutionCallback object to + * proceed. + * + * One of the IExecutionCallback::notify* methods must be called on a given + * ExecutionCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from launching the asynchronous task + * (if the launch fails) or from the asynchronous task itself (if the + * launch succeeds). Must be: + * - NONE if the asynchronous execution was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - OUTPUT_INSUFFICIENT_SIZE if provided output buffer is not large + * enough to store the resultant values + * - INVALID_ARGUMENT if the input request is invalid + */ + hal::Return<void> notify(hal::V1_0::ErrorStatus status) override; + + /** + * IExecutionCallback::notify_1_2 marks the callback object with the results + * (error status, dynamic output shapes, and timing information) of the + * asynchronous execution that held this callback and enables all prior and + * future wait calls on the ExecutionCallback object to proceed. + * + * One of the IExecutionCallback::notify* methods must be called on a given + * ExecutionCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from launching the asynchronous task + * (if the launch fails) or from the asynchronous task itself (if the + * launch succeeds). Must be: + * - NONE if the asynchronous execution was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if the asynchronous task resulted in an unspecified + * error + * - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is + * not large enough to store the corresponding output + * - INVALID_ARGUMENT if one of the input arguments to prepareModel is + * invalid + * @param outputShapes A list of shape information of model output operands. + * The index into "outputShapes" corresponds to the index of the output + * operand in the Request outputs vector. outputShapes must be empty + * unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE. + * @param Timing Duration of execution. Unless MeasureTiming::YES was passed + * when launching the execution and status is NONE, all times must be + * reported as UINT64_MAX. A driver may choose to report any time as + * UINT64_MAX, indicating that particular measurement is not available. + */ + hal::Return<void> notify_1_2(hal::V1_0::ErrorStatus status, + const hal::hidl_vec<hal::OutputShape>& outputShapes, + const hal::Timing& timing) override; + + /** + * IExecutionCallback::notify_1_3 marks the callback object with the results + * (error status, dynamic output shapes, and timing information) of the + * asynchronous execution that held this callback and enables all prior and + * future wait calls on the ExecutionCallback object to proceed. + * + * One of the IExecutionCallback::notify* methods must be called on a given + * ExecutionCallback object. + * + * If the callback object is notified more than once, only the results of + * the first call to notify* are used, and the results from subsequent calls + * are discarded. + * + * @param status Error status returned from launching the asynchronous task + * (if the launch fails) or from the asynchronous task itself (if the + * launch succeeds). Must be: + * - NONE if the asynchronous execution was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if the asynchronous task resulted in an unspecified + * error + * - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is + * not large enough to store the corresponding output + * - INVALID_ARGUMENT if one of the input arguments to prepareModel is + * invalid + * - MISSED_DEADLINE_* if the deadline could not be met + * - RESOURCE_EXHAUSTED_* if the execution was aborted by the driver + * @param outputShapes A list of shape information of model output operands. + * The index into "outputShapes" corresponds to the index of the output + * operand in the Request outputs vector. outputShapes must be empty + * unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE. + * @param Timing Duration of execution. Unless MeasureTiming::YES was passed + * when launching the execution and status is NONE, all times must be + * reported as UINT64_MAX. A driver may choose to report any time as + * UINT64_MAX, indicating that particular measurement is not available. + */ + hal::Return<void> notify_1_3(hal::V1_3::ErrorStatus status, + const hal::hidl_vec<hal::OutputShape>& outputShapes, + const hal::Timing& timing) override; + + // An overload of the latest notify interface to hide the version from ExecutionBuilder. + hal::Return<void> notify(hal::V1_3::ErrorStatus status, + const hal::hidl_vec<hal::OutputShape>& outputShapes, + const hal::Timing& timing) { + return notify_1_3(status, outputShapes, timing); + } + + /** + * Mark the callback object as a dead object. This acts as a call to notify. + */ + void notifyAsDeadObject(); + + /** + * ExecutionCallback::wait blocks until notify* has been called on the + * callback object. + */ + void wait() const; + + /** + * Retrieves the error status returned from the asynchronous task launched + * by IPreparedModel::execute* (but not by + * IPreparedModel::executeSynchronously*). If IPreparedModel::execute* has + * not finished asynchronously executing, this call will block until the + * asynchronous task notifies the object. + * + * @return status Error status returned from launching the asynchronous task + * (if the launch fails) or from the asynchronous task itself (if the + * launch succeeds). Must be: + * - NONE if the asynchronous execution was successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if the asynchronous task resulted in an unspecified + * error + * - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is + * not large enough to store the corresponding output + * - INVALID_ARGUMENT if one of the input arguments to prepareModel is + * invalid + * - MISSED_DEADLINE_* if the deadline could not be met + * - RESOURCE_EXHAUSTED_* if the task was aborted by the driver + * - DEAD_OBJECT if the driver crashed without returning a result + */ + hal::V1_3::ErrorStatus getStatus() const; + + /** + * Retrieves the output shapes returned from the asynchronous task launched + * by either IPreparedModel::execute_1_2 or IPreparedModel::execute_1_3. If + * IPreparedModel::execute_1_2 or IPreparedModel::execute_1_3 has not + * finished asynchronously executing, this call will block until the + * asynchronous task notifies the object. + * + * If the asynchronous task was launched by IPreparedModel::execute, an + * empty vector will be returned. + * + * @return outputShapes A list of shape information of model output + * operands. The index into "outputShapes" corresponds to the index of + * the output operand in the Request outputs vector. outputShapes must + * be empty unless the status is either NONE or + * OUTPUT_INSUFFICIENT_SIZE. outputShaps may be empty if the status is + * NONE and all model output operands are fully-specified at execution + * time. outputShapes must have the same number of elements as the + * number of model output operands if the status is + * OUTPUT_INSUFFICIENT_SIZE, or if the status is NONE and the model has + * at least one output operand that is not fully-specified. + */ + const std::vector<hal::OutputShape>& getOutputShapes() const; + + /** + * Retrieves the duration of execution of the asynchronous task launched by + * by either IPreparedModel::execute_1_2 or IPreparedModel::execute_1_3. If + * IPreparedModel::execute_1_2 or IPreparedModel::execute_1_3 has not + * finished asynchronously executing, this call will block until the + * asynchronous task notifies the object. + * + * If the asynchronous task was launched by IPreparedModel::execute, every + * time must be UINT64_MAX. + * + * @return timing Duration of the execution. Every time must be UINT64_MAX + * unless the status is NONE. + */ + hal::Timing getTiming() const; + + /** + * ExecutionCallback::bindThread binds a thread to the ExecutionCallback + * object. The bound thread is later joined by ExecutionCallback::wait or + * ExecutionCallback::get*. + * + * Once a thread is bound with ExecutionCallback::bindThread, the client + * code must ensure that ExecutionCallback::wait or ExecutionCallback::get* + * has been called before the ExecutionCallback object is destroyed. + * + * The bound thread must not call any ExecutionCallback method with the + * exception of ExecutionCallback::notify*, which it must call when the + * thread has finished its computation. + * + * ExecutionCallback::bindThread can be called at most once on a given + * callback object. + * + * @param asyncThread Thread to be bound to the callback object. The thread + * object must represent a thread of execution -- i.e., + * std::thread::joinable() must be true. + * @return bool True if successful, false if thread was not properly bound. + */ + bool bindThread(std::thread asyncThread); + + /** + * ExecutionCallback::setOnFinish binds a callback to the ExecutionCallback + * object that will be executed during one of the ExecutionCallback::notify* + * calls but before any calls to wait or get* return. This provided callback + * is provided with both the ErrorStatus and the output shapes from + * ExecutionCallback::notify*. + * + * The bound function must not synchronize with or otherwise access the + * callback object it is bound to, as this could cause a deadlock. + * + * This call will not bind the provided callback if any of the following + * occur: + * (1) the provided callback is invalid (i.e., "(bool) finish" is false) + * (2) ExecutionCallback already contains a bound callback + * (3) ExecutionCallback has already been notified with results + * + * @param finish Callback to be executed when ExecutionCallback is notified + * with results. + */ + void setOnFinish(const ExecutionFinish& finish); + + /** + * Queries whether the object is dead. + * + * @return 'true' if dead, 'false' otherwise. + */ + bool isDeadObject() const; + + private: + /* + * ExecutionCallback::notifyInternal stores the results of the execution + * (status, output shapes, and timing information) in the ExecutionCallback + * object and invokes the bound callback function "mOnFinish" (if present) + * before any call to wait or get* return. It then enables all prior and + * future wait calls on the ExecutionCallback object to proceed. + */ + hal::Return<void> notifyInternal(bool deadObject, hal::ErrorStatus errorStatus, + std::vector<hal::OutputShape> outputShapes, + hal::Timing timing); + + // members + mutable std::mutex mMutex; + mutable std::condition_variable mCondition; + mutable std::thread mThread GUARDED_BY(mMutex); + ExecutionFinish mOnFinish GUARDED_BY(mMutex); + bool mNotified GUARDED_BY(mMutex) = false; + bool mDeadObject = false; + hal::ErrorStatus mErrorStatus = hal::ErrorStatus::GENERAL_FAILURE; + std::vector<hal::OutputShape> mOutputShapes; + hal::Timing mTiming = {}; +}; + +} // namespace android::nn + +#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_CALLBACKS_H
diff --git a/runtime/CompilationBuilder.cpp b/runtime/CompilationBuilder.cpp index f58df05..8b2a269 100644 --- a/runtime/CompilationBuilder.cpp +++ b/runtime/CompilationBuilder.cpp
@@ -18,11 +18,6 @@ #include "CompilationBuilder.h" -#include <LegacyUtils.h> -#include <nnapi/IBurst.h> -#include <nnapi/SharedMemory.h> -#include <nnapi/Types.h> - #include <algorithm> #include <limits> #include <memory> @@ -32,13 +27,17 @@ #include "BurstBuilder.h" #include "ExecutionBuilder.h" +#include "ExecutionBurstController.h" #include "ExecutionPlan.h" #include "Manager.h" #include "ModelBuilder.h" +#include "Utils.h" namespace android { namespace nn { +using namespace hal; + CompilationBuilder::CompilationBuilder(const ModelBuilder* model, const std::vector<std::shared_ptr<Device>>& devices, bool explicitDeviceList) @@ -61,11 +60,10 @@ mFinished = true; if (mIsCacheInfoProvided) { - mPlan.setCaching(&mCacheInfo, mToken); + mPlan.setCaching(&mCacheDir, mToken); } if (mPartitioning) { - int n = mModel->partitionTheWork(mDevices, mPreference, mPriority, deadline, &mPlan, - mFailPartitioning); + int n = mModel->partitionTheWork(mDevices, mPreference, mPriority, deadline, &mPlan); switch (n) { case ANEURALNETWORKS_NO_ERROR: return n; @@ -98,7 +96,7 @@ VLOG(COMPILATION) << "CompilationBuilder::finish with CPU fallback"; mPlan.reset(); mPlan.becomeSingleStep(DeviceManager::getCpuDevice(), mModel); - return mPlan.finish(mPreference, mPriority, deadline, ANEURALNETWORKS_NO_ERROR); + return mPlan.finish(mPreference, mPriority, deadline); } int CompilationBuilder::setPreference(int32_t preference) { @@ -122,63 +120,11 @@ << "ANeuralNetworksCompilation_setCaching can't modify after compilation finished"; return ANEURALNETWORKS_BAD_STATE; } - std::string path = cacheDir; + mCacheDir = cacheDir; // Make sure the cache dir can concat with the filename. - if (!path.empty() && path.back() != '/') { - path.push_back('/'); + if (!mCacheDir.empty() && mCacheDir.back() != '/') { + mCacheDir.push_back('/'); } - mCacheInfo.variant = std::move(path); - std::copy(token, token + ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, mToken); - mIsCacheInfoProvided = true; - return ANEURALNETWORKS_NO_ERROR; -} - -static GeneralResult<SharedHandle> createCacheHandle(int fd) { - std::vector<base::unique_fd> fds; - fds.push_back(NN_TRY(dupFd(fd))); - return std::make_shared<const Handle>(Handle{ - .fds = std::move(fds), - .ints = {}, - }); -} - -static GeneralResult<std::vector<SharedHandle>> createCacheHandleVec(const int* fds, - uint32_t numFds) { - std::vector<SharedHandle> handles; - handles.reserve(numFds); - for (uint32_t i = 0; i < numFds; i++) { - handles.push_back(NN_TRY(createCacheHandle(fds[i]))); - } - return handles; -} - -int CompilationBuilder::setCachingFromFds(const int* modelCacheFds, - const uint32_t numModelCacheFiles, - const int* dataCacheFds, const uint32_t numDataCacheFiles, - const uint8_t* token) { - if (mFinished) { - LOG(ERROR) << "SL_ANeuralNetworksCompilation_setCachingFromFds can't modify after " - "compilation finished"; - return ANEURALNETWORKS_BAD_STATE; - } - auto modelCache = createCacheHandleVec(modelCacheFds, numModelCacheFiles); - if (!modelCache.has_value()) { - LOG(ERROR) << "SL_ANeuralNetworksCompilation_setCachingFromFds can't duplicate model cache " - "fds: " - << modelCache.error().message; - return ANEURALNETWORKS_BAD_DATA; - } - auto dataCache = createCacheHandleVec(dataCacheFds, numDataCacheFiles); - if (!dataCache.has_value()) { - LOG(ERROR) << "SL_ANeuralNetworksCompilation_setCachingFromFds can't duplicate data cache " - "fds: " - << dataCache.error().message; - return ANEURALNETWORKS_BAD_DATA; - } - mCacheInfo.variant = CacheHandles{ - .modelCache = std::move(modelCache).value(), - .dataCache = std::move(dataCache).value(), - }; std::copy(token, token + ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, mToken); mIsCacheInfoProvided = true; return ANEURALNETWORKS_NO_ERROR; @@ -220,9 +166,9 @@ return ANEURALNETWORKS_NO_ERROR; } -int CompilationBuilder::forTest_setPartitioning(uint32_t partitioning) { +int CompilationBuilder::setPartitioning(uint32_t partitioning) { if (mFinished) { - LOG(ERROR) << "CompilationBuilder::forTest_setPartitioning can't modify after compilation " + LOG(ERROR) << "ANeuralNetworksCompilation_setPartitioning can't modify after compilation " "finished"; return ANEURALNETWORKS_BAD_STATE; } @@ -231,108 +177,6 @@ return ANEURALNETWORKS_NO_ERROR; } -int CompilationBuilder::forTest_failPartitioning(int fail) { - if (mFinished) { - LOG(ERROR) << "CompilationBuilder::forTest_failPartitioning can't modify after compilation " - "finished"; - return ANEURALNETWORKS_BAD_STATE; - } - - mFailPartitioning = fail; - return ANEURALNETWORKS_NO_ERROR; -} - -int CompilationBuilder::getPreferredMemoryAlignmentForInput(uint32_t index, - uint32_t* alignment) const { - CHECK(alignment != nullptr); - if (!mFinished) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput passed an " - "unfinished compilation"; - return ANEURALNETWORKS_BAD_STATE; - } - if (!mPlan.isValid()) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput passed an " - "invalid compilation"; - return ANEURALNETWORKS_BAD_STATE; - } - if (index >= mModel->inputCount()) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput passed an " - "invalid input index " - << index; - return ANEURALNETWORKS_BAD_DATA; - } - *alignment = mPlan.getMemoryPreference(IOType::INPUT, index).alignment; - return ANEURALNETWORKS_NO_ERROR; -} - -int CompilationBuilder::getPreferredMemoryPaddingForInput(uint32_t index, uint32_t* padding) const { - CHECK(padding != nullptr); - if (!mFinished) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput passed an " - "unfinished compilation"; - return ANEURALNETWORKS_BAD_STATE; - } - if (!mPlan.isValid()) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput passed an " - "invalid compilation"; - return ANEURALNETWORKS_BAD_STATE; - } - if (index >= mModel->inputCount()) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput passed an " - "invalid input index " - << index; - return ANEURALNETWORKS_BAD_DATA; - } - *padding = mPlan.getMemoryPreference(IOType::INPUT, index).padding; - return ANEURALNETWORKS_NO_ERROR; -} - -int CompilationBuilder::getPreferredMemoryAlignmentForOutput(uint32_t index, - uint32_t* alignment) const { - CHECK(alignment != nullptr); - if (!mFinished) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput passed an " - "unfinished compilation"; - return ANEURALNETWORKS_BAD_STATE; - } - if (!mPlan.isValid()) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput passed an " - "invalid compilation"; - return ANEURALNETWORKS_BAD_STATE; - } - if (index >= mModel->outputCount()) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput passed an " - "invalid output index " - << index; - return ANEURALNETWORKS_BAD_DATA; - } - *alignment = mPlan.getMemoryPreference(IOType::OUTPUT, index).alignment; - return ANEURALNETWORKS_NO_ERROR; -} - -int CompilationBuilder::getPreferredMemoryPaddingForOutput(uint32_t index, - uint32_t* padding) const { - CHECK(padding != nullptr); - if (!mFinished) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput passed an " - "unfinished compilation"; - return ANEURALNETWORKS_BAD_STATE; - } - if (!mPlan.isValid()) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput passed an " - "invalid compilation"; - return ANEURALNETWORKS_BAD_STATE; - } - if (index >= mModel->outputCount()) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput passed an " - "invalid output index " - << index; - return ANEURALNETWORKS_BAD_DATA; - } - *padding = mPlan.getMemoryPreference(IOType::OUTPUT, index).padding; - return ANEURALNETWORKS_NO_ERROR; -} - int CompilationBuilder::createExecution(ExecutionBuilder** execution) { if (!mFinished) { LOG(ERROR) << "ANeuralNetworksExecution_create passed an unfinished compilation"; @@ -344,11 +188,7 @@ *execution = nullptr; return ANEURALNETWORKS_BAD_STATE; } - if (mPlan.isSimple()) { - *execution = new (std::nothrow) SimpleExecutionBuilder(this); - } else { - *execution = new (std::nothrow) CompoundExecutionBuilder(this); - } + *execution = new (std::nothrow) ExecutionBuilder(this); return (*execution ? ANEURALNETWORKS_NO_ERROR : ANEURALNETWORKS_OUT_OF_MEMORY); } @@ -363,7 +203,8 @@ *burst = nullptr; return ANEURALNETWORKS_BAD_STATE; } - std::vector<SharedBurst> burstControllers = mPlan.makeBursts(); + std::vector<std::shared_ptr<ExecutionBurstController>> burstControllers = + mPlan.makeBursts(mPreference); *burst = new (std::nothrow) BurstBuilder(this, std::move(burstControllers)); return (*burst ? ANEURALNETWORKS_NO_ERROR : ANEURALNETWORKS_OUT_OF_MEMORY); }
diff --git a/runtime/CompilationBuilder.h b/runtime/CompilationBuilder.h index 93542cc..d94fb18 100644 --- a/runtime/CompilationBuilder.h +++ b/runtime/CompilationBuilder.h
@@ -24,7 +24,6 @@ #include <vector> #include "ExecutionPlan.h" -#include "Manager.h" #include "NeuralNetworks.h" namespace android { @@ -48,11 +47,9 @@ int setPreference(int32_t preference); + int setPartitioning(uint32_t partitioning); + int setCaching(const std::string& cacheDir, const uint8_t* token); - // Dups the fds - int setCachingFromFds(const int* modelCacheFds, const uint32_t numModelCacheFiles, - const int* dataCacheFds, const uint32_t numDataCacheFiles, - const uint8_t* token); int setPriority(int32_t priority); @@ -60,11 +57,6 @@ int finish(); - int getPreferredMemoryAlignmentForInput(uint32_t index, uint32_t* alignment) const; - int getPreferredMemoryPaddingForInput(uint32_t index, uint32_t* padding) const; - int getPreferredMemoryAlignmentForOutput(uint32_t index, uint32_t* alignment) const; - int getPreferredMemoryPaddingForOutput(uint32_t index, uint32_t* padding) const; - int createExecution(ExecutionBuilder** execution); int createBurst(BurstBuilder** burst); @@ -74,16 +66,9 @@ int forEachStepRoleOfInput(uint32_t index, const StepRoleCallback& callback) const; int forEachStepRoleOfOutput(uint32_t index, const StepRoleCallback& callback) const; - bool createdWithExplicitDeviceList() const { return mExplicitDeviceList; } - - bool hasDynamicTemporaries() const { return mPlan.hasDynamicTemporaries(); } - - // These functions are solely intended for use by unit tests of the - // partitioning algorithm. const ExecutionPlan& forTest_getExecutionPlan() const { return mPlan; } - int forTest_setPartitioning(uint32_t partitioning); - int forTest_failPartitioning( - int resultCode); // If not ANEURALNETWORKS_NO_ERROR, then simulate partitioning failure + + bool createdWithExplicitDeviceList() const { return mExplicitDeviceList; } private: const ModelBuilder* mModel; @@ -98,9 +83,6 @@ // we can override this later. uint32_t mPartitioning; - // For testing purposes, simulate partitioning failure. - int mFailPartitioning = ANEURALNETWORKS_NO_ERROR; - // Once the compilation has been finished, we should not allow further // modifications to the compilation. bool mFinished = false; @@ -115,7 +97,7 @@ bool mExplicitDeviceList; // Compilation caching information. - CacheInfo mCacheInfo; + std::string mCacheDir; uint8_t mToken[ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN]; bool mIsCacheInfoProvided = false;
diff --git a/runtime/Event.h b/runtime/Event.h index 8157aa1..87d7709 100644 --- a/runtime/Event.h +++ b/runtime/Event.h
@@ -17,52 +17,43 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EVENT_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EVENT_H -#include <android-base/logging.h> -#include <nnapi/Types.h> - -#include <memory> -#include <mutex> +#include <android/sync.h> #include <utility> -#include "ExecutionCallback.h" +#include "Callbacks.h" +#include "HalInterfaces.h" namespace android::nn { class IEvent { public: virtual ~IEvent() = default; - virtual ErrorStatus wait() const = 0; + virtual void wait() const = 0; + virtual hal::ErrorStatus getStatus() const = 0; virtual int getSyncFenceFd(bool shouldDup) const = 0; }; // The CallbackEvent wraps ExecutionCallback class CallbackEvent : public IEvent { public: - CallbackEvent(std::shared_ptr<ExecutionCallback> callback) - : kExecutionCallback(std::move(callback)) { + CallbackEvent(sp<ExecutionCallback> callback) : kExecutionCallback(std::move(callback)) { CHECK(kExecutionCallback != nullptr); } - ErrorStatus wait() const override { - kExecutionCallback->wait(); - return kExecutionCallback->getStatus(); - } - + void wait() const override { kExecutionCallback->wait(); } + hal::ErrorStatus getStatus() const override { return kExecutionCallback->getStatus(); } // Always return -1 as this is not backed by a sync fence. int getSyncFenceFd(bool /*should_dup*/) const override { return -1; } private: - const std::shared_ptr<ExecutionCallback> kExecutionCallback; + const sp<ExecutionCallback> kExecutionCallback; }; -// The SyncFenceEvent wraps sync fence and ExecuteFencedInfoCallback +// The SyncFenceEvent wraps sync fence and IFencedExecutionCallback class SyncFenceEvent : public IEvent { - using ExecutionFinishCallback = std::function<ErrorStatus(ErrorStatus)>; - public: - SyncFenceEvent(int sync_fence_fd, const ExecuteFencedInfoCallback& callback, - const ExecutionFinishCallback& finish) - : kFencedExecutionCallback(callback), kFinishCallback(finish) { + SyncFenceEvent(int sync_fence_fd, const sp<hal::IFencedExecutionCallback>& callback) + : kFencedExecutionCallback(callback) { if (sync_fence_fd > 0) { // Dup the provided file descriptor mSyncFenceFd = dup(sync_fence_fd); @@ -74,29 +65,27 @@ ~SyncFenceEvent() { close(mSyncFenceFd); } // Use syncWait to wait for the sync fence until the status change. - // In case of syncWait error, query the dispatch callback for detailed error status. - // This method maps to the NDK ANeuralNetworksEvent_wait, which must be thread-safe. - ErrorStatus wait() const override { - std::lock_guard<std::mutex> lock(mMutex); - if (mFinished) return mError; + void wait() const override { syncWait(mSyncFenceFd, -1); } + // Get the status of the event. + // In case of syncWait error, query the dispatch callback for detailed + // error status. + hal::ErrorStatus getStatus() const override { + auto error = hal::ErrorStatus::NONE; if (mSyncFenceFd > 0 && syncWait(mSyncFenceFd, -1) != FenceState::SIGNALED) { - mError = ErrorStatus::GENERAL_FAILURE; + error = hal::ErrorStatus::GENERAL_FAILURE; // If there is a callback available, use the callback to get the error code. if (kFencedExecutionCallback != nullptr) { - auto result = kFencedExecutionCallback(); - if (!result.has_value()) { - LOG(ERROR) << "Fenced execution callback failed: " << result.error().message; - mError = result.error().code; - CHECK_NE(mError, ErrorStatus::NONE); + const hal::Return<void> ret = kFencedExecutionCallback->getExecutionInfo( + [&error](hal::ErrorStatus status, hal::Timing, hal::Timing) { + error = status; + }); + if (!ret.isOk()) { + error = hal::ErrorStatus::GENERAL_FAILURE; } } } - if (kFinishCallback != nullptr) { - mError = kFinishCallback(mError); - } - mFinished = true; - return mError; + return error; } // Return the sync fence fd. @@ -114,12 +103,7 @@ private: // TODO(b/148423931): used android::base::unique_fd instead. int mSyncFenceFd = -1; - const ExecuteFencedInfoCallback kFencedExecutionCallback; - const ExecutionFinishCallback kFinishCallback; - - mutable std::mutex mMutex; - mutable bool mFinished GUARDED_BY(mMutex) = false; - mutable ErrorStatus mError GUARDED_BY(mMutex) = ErrorStatus::NONE; + const sp<hal::IFencedExecutionCallback> kFencedExecutionCallback; }; } // namespace android::nn
diff --git a/runtime/ExecutionBuilder.cpp b/runtime/ExecutionBuilder.cpp index f5b74b6..d65d964 100644 --- a/runtime/ExecutionBuilder.cpp +++ b/runtime/ExecutionBuilder.cpp
@@ -18,18 +18,10 @@ #include "ExecutionBuilder.h" -#include <ControlFlow.h> -#include <CpuExecutor.h> -#include <LegacyUtils.h> -#include <Tracing.h> -#include <android-base/logging.h> -#include <nnapi/IBurst.h> -#include <nnapi/IPreparedModel.h> -#include <nnapi/Types.h> +#include <android/sync.h> #include <algorithm> #include <limits> -#include <map> #include <memory> #include <mutex> #include <optional> @@ -39,75 +31,24 @@ #include <utility> #include <vector> -#include "BurstBuilder.h" #include "CompilationBuilder.h" +#include "ControlFlow.h" +#include "CpuExecutor.h" +#include "ExecutionBurstController.h" +#include "HalInterfaces.h" #include "Manager.h" #include "ModelArgumentInfo.h" #include "ModelBuilder.h" +#include "Tracing.h" #include "TypeManager.h" +#include "Utils.h" namespace android { namespace nn { -// Partial validation of output shapes returned from driver, to ensure they -// conform to a very specific set of rules. -static bool validateOutputShapesFromDriver(ErrorStatus executionStatus, const ModelBuilder* model, - const std::vector<OutputShape>& shapes) { - // Enforces the following rules (some of which are from b/154054474): - // - shapes vector is empty except in the case of NONE or OUTPUT_INSUFFICIENT_SIZE. - // If the vector is not empty, it must have as many entries as the step model has outputs. - // - If NONE, then either shapes vector is empty, or every shape is - // marked isSufficient and, if a tensor, has known rank. - // - If OUTPUT_INSUFFICIENT_SIZE, then the vector is not empty. At least one entry - // is marked !isSufficient. - switch (executionStatus) { - case ErrorStatus::NONE: { - NN_RET_CHECK(shapes.size() == 0 || shapes.size() == model->outputCount()) - << "With execution ErrorStatus " << executionStatus - << " output shapes vector must be empty or of length " << model->outputCount() - << " but has length " << shapes.size(); - NN_RET_CHECK(std::all_of(shapes.begin(), shapes.end(), - [](const OutputShape& shape) { return shape.isSufficient; })) - << "With execution ErrorStatus " << executionStatus - << " at least one output shape is unexpectedly marked !isSufficient"; +using namespace hal; - const TypeManager* tm = TypeManager::get(); - for (uint32_t outputIndex = 0, outputCount = shapes.size(); outputIndex < outputCount; - ++outputIndex) { - const Operand& outputOperand = model->getOutputOperand(outputIndex); - NN_RET_CHECK(!tm->isTensorType(outputOperand.type) || - (shapes[outputIndex].dimensions.size() != 0)) - << "With execution ErrorStatus " << executionStatus << " output#" - << outputIndex << " shape unexpectedly has zero rank"; - } - - break; - } - case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: { - NN_RET_CHECK(shapes.size() == model->outputCount()) - << "With execution ErrorStatus " << executionStatus - << " output shapes vector must be of length " << model->outputCount() - << " but has length " << shapes.size(); - NN_RET_CHECK(std::any_of(shapes.begin(), shapes.end(), - [](const OutputShape& shape) { return !shape.isSufficient; })) - << "With execution ErrorStatus " << executionStatus - << " at least one output shape must have been marked !isSufficient"; - break; - } - default: { - NN_RET_CHECK(shapes.size() == 0) - << "With execution ErrorStatus " << executionStatus - << " output shapes vector must be empty but has length " << shapes.size(); - break; - } - } - return true; -} -static bool validateOutputShapesFromDriver(int executionResultCode, const ModelBuilder* model, - const std::vector<OutputShape>& shapes) { - return validateOutputShapesFromDriver(convertResultCodeToErrorStatus(executionResultCode), - model, shapes); -} +const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; static MeasureTiming measureTiming(const ExecutionBuilder* execution) { return execution->measureTiming() ? MeasureTiming::YES : MeasureTiming::NO; @@ -117,7 +58,7 @@ const char* tag, bool allowUnspecified) { if (newType != nullptr) { const Extension::OperandTypeInformation* info = nullptr; - if (isExtension(operand.type)) { + if (isExtensionOperandType(operand.type)) { NN_RET_CHECK(TypeManager::get()->getExtensionOperandTypeInfo(operand.type, &info)); } if (validateOperandType(*newType, info, tag, allowUnspecified) != @@ -129,8 +70,7 @@ return true; } if (operand.dimensions.size() != newType->dimensionCount) { - LOG(ERROR) << tag << ": Setting with incompatible dimension count (existing = " - << operand.dimensions.size() << ", new = " << newType->dimensionCount << ")"; + LOG(ERROR) << tag << ": Setting with incompatible dimension count"; return false; } for (uint32_t i = 0; i < newType->dimensionCount; i++) { @@ -153,30 +93,44 @@ : mCompilation(compilation), mModel(compilation->mModel), mPlan(&compilation->mPlan), - mAllowCpuFallback(DeviceManager::partitioningAllowsFallback(compilation->mPartitioning)), + mPartitioning(compilation->mPartitioning), mInputs(mModel->inputCount()), mOutputs(mModel->outputCount()) { VLOG(EXECUTION) << "ExecutionBuilder::ExecutionBuilder with " << mInputs.size() << " inputs and " << mOutputs.size() << " outputs"; } -SimpleExecutionBuilder::SimpleExecutionBuilder(const CompilationBuilder* compilation) - : ExecutionBuilder(compilation) { - CHECK(mPlan->isSimple()); -} - -CompoundExecutionBuilder::CompoundExecutionBuilder(const CompilationBuilder* compilation) - : ExecutionBuilder(compilation) { - CHECK(mPlan->isCompound()); -} - const ModelBuilder* ExecutionBuilder::getSourceModel(uint32_t index) const { return mPlan->getSourceModels().getModel(index); } +bool ExecutionBuilder::isFinished() const { + CHECK(!(mFinishedWithoutSyncFence && hasSyncFence())); + if (mFinishedWithoutSyncFence) { + return true; + } + if (hasSyncFence()) { + auto r = syncWait(mSyncFenceFd, 0); + CHECK(r != FenceState::UNKNOWN); + return r != FenceState::ACTIVE; + } + return false; +} + +ExecutionBuilder::Completion ExecutionBuilder::completedWith() const { + CHECK(isFinished()); + if (hasSyncFence()) { + auto r = syncWait(mSyncFenceFd, 0); + CHECK(r == FenceState::SIGNALED || r == FenceState::ERROR); + return (r == FenceState::SIGNALED) ? Completion::NO_ERROR : Completion::OTHER_ERROR; + } else { + return mCompletionWithoutSyncFence; + } +} + int ExecutionBuilder::setInput(uint32_t index, const ANeuralNetworksOperandType* type, const void* buffer, size_t length) { - if (computationStarted()) { + if (mStarted) { LOG(ERROR) << "ANeuralNetworksExecution_setInput called after the " "execution has started."; return ANEURALNETWORKS_BAD_STATE; @@ -202,18 +156,15 @@ } int n; std::tie(n, mInputs[index]) = ModelArgumentInfo::createFromPointer( - mModel->getInputOperand(index), type, const_cast<void*>(buffer), l, - mInputAndOutputPaddingEnabled); - mHasCalledSetInputOutput = true; + mModel->getInputOperand(index), type, const_cast<void*>(buffer), l); return n; } int ExecutionBuilder::setInputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type, - const RuntimeMemory* memory, size_t offset, - size_t length) { + const Memory* memory, size_t offset, size_t length) { // Should be similar to StepExecutor::setInputOrOutputFromMemory() - if (computationStarted()) { + if (mStarted) { LOG(ERROR) << "ANeuralNetworksExecution_setInputFromMemory called after the " "execution has started."; return ANEURALNETWORKS_BAD_STATE; @@ -237,8 +188,8 @@ // region is used. We update the length here because the drivers are still expecting a real // length. For other memories that do not allow this semantic, it is checked in // MemoryValidatorBase::validate before reaching here. - if (validate(memory->getMemory()).ok() && offset == 0 && length == 0) { - length = memory->getSize(); + if (memory->getHidlMemory().valid() && offset == 0 && length == 0) { + length = memory->getHidlMemory().size(); } // TODO validate the rest uint32_t poolIndex = mMemories.add(memory); @@ -249,16 +200,14 @@ return ANEURALNETWORKS_BAD_STATE; } int n; - std::tie(n, mInputs[index]) = - ModelArgumentInfo::createFromMemory(mModel->getInputOperand(index), type, poolIndex, - offset, length, mInputAndOutputPaddingEnabled); - mHasCalledSetInputOutput = true; + std::tie(n, mInputs[index]) = ModelArgumentInfo::createFromMemory( + mModel->getInputOperand(index), type, poolIndex, offset, length); return n; } int ExecutionBuilder::setOutput(uint32_t index, const ANeuralNetworksOperandType* type, void* buffer, size_t length) { - if (computationStarted()) { + if (mStarted) { LOG(ERROR) << "ANeuralNetworksExecution_setOutput called after the " "execution has started."; return ANEURALNETWORKS_BAD_STATE; @@ -283,18 +232,16 @@ return ANEURALNETWORKS_BAD_STATE; } int n; - std::tie(n, mOutputs[index]) = ModelArgumentInfo::createFromPointer( - mModel->getOutputOperand(index), type, buffer, l, mInputAndOutputPaddingEnabled); - mHasCalledSetInputOutput = true; + std::tie(n, mOutputs[index]) = + ModelArgumentInfo::createFromPointer(mModel->getOutputOperand(index), type, buffer, l); return n; } int ExecutionBuilder::setOutputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type, - const RuntimeMemory* memory, size_t offset, - size_t length) { + const Memory* memory, size_t offset, size_t length) { // Should be similar to StepExecutor::setInputOrOutputFromMemory() - if (computationStarted()) { + if (mStarted) { LOG(ERROR) << "ANeuralNetworksExecution_setOutputFromMemory called after the " "execution has started."; return ANEURALNETWORKS_BAD_STATE; @@ -318,8 +265,8 @@ // region is used. We update the length here because the drivers are still expecting a real // length. For other memories that do not allow this semantic, it is checked in // MemoryValidatorBase::validate before reaching here. - if (validate(memory->getMemory()).ok() && offset == 0 && length == 0) { - length = memory->getSize(); + if (memory->getHidlMemory().valid() && offset == 0 && length == 0) { + length = memory->getHidlMemory().size(); } // TODO validate the rest uint32_t poolIndex = mMemories.add(memory); @@ -329,10 +276,8 @@ return ANEURALNETWORKS_BAD_STATE; } int n; - std::tie(n, mOutputs[index]) = - ModelArgumentInfo::createFromMemory(mModel->getOutputOperand(index), type, poolIndex, - offset, length, mInputAndOutputPaddingEnabled); - mHasCalledSetInputOutput = true; + std::tie(n, mOutputs[index]) = ModelArgumentInfo::createFromMemory( + mModel->getOutputOperand(index), type, poolIndex, offset, length); return n; } @@ -344,7 +289,7 @@ << "with numDevices = 1"; return ANEURALNETWORKS_BAD_DATA; } - if (computationStarted()) { + if (mStarted) { LOG(ERROR) << "ANeuralNetworksExecution_setMeasureTiming called after the " "execution has started."; return ANEURALNETWORKS_BAD_STATE; @@ -354,7 +299,7 @@ } int ExecutionBuilder::getDuration(int32_t durationCode, uint64_t* duration) const { - if (!completed()) { + if (!isFinished()) { LOG(ERROR) << "ANeuralNetworksExecution_getDuration called before the " "execution has finished."; *duration = UINT64_MAX; @@ -367,6 +312,9 @@ return ANEURALNETWORKS_BAD_STATE; } + // NOTE: At the HAL level, timing is in microseconds. At the NDK level, nanoseconds. + const uint64_t kNanoPerMicro = 1000; + if (!mMeasureTiming) { *duration = UINT64_MAX; return ANEURALNETWORKS_BAD_STATE; @@ -375,39 +323,41 @@ Timing timingLaunched = mTimingWithoutFencedExecutionCallback; Timing timingFenced = timingLaunched; if (mFencedExecutionCallback != nullptr) { - auto result = mFencedExecutionCallback(); - if (!result.has_value()) { - LOG(ERROR) << "Fenced execution callback failed: " << result.error().message; + ErrorStatus status; + const Return<void> ret = mFencedExecutionCallback->getExecutionInfo( + [&status, &timingLaunched, &timingFenced](ErrorStatus error, Timing tLaunched, + Timing tFenced) { + status = error; + timingLaunched = tLaunched; + timingFenced = tFenced; + }); + if (!ret.isOk()) { + *duration = UINT64_MAX; + return ANEURALNETWORKS_OP_FAILED; + } + if (status != ErrorStatus::NONE) { *duration = UINT64_MAX; return ANEURALNETWORKS_BAD_STATE; } - std::tie(timingLaunched, timingFenced) = std::move(result).value(); } - const OptionalDuration selectedDuration = [durationCode, &timingLaunched, - &timingFenced]() -> OptionalDuration { - switch (durationCode) { - case ANEURALNETWORKS_DURATION_ON_HARDWARE: - return timingLaunched.timeOnDevice; - case ANEURALNETWORKS_DURATION_IN_DRIVER: - return timingLaunched.timeInDriver; - case ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE: - return timingFenced.timeOnDevice; - case ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER: - return timingFenced.timeInDriver; - default: - LOG(FATAL) << "unexpected"; - return std::nullopt; - } - }(); - if (selectedDuration.has_value()) { - constexpr uint64_t kMaxTiming = std::numeric_limits<uint64_t>::max() - 1; - using CommonType = std::common_type_t<Duration::rep, uint64_t>; - const auto count = std::min<CommonType>(selectedDuration.value().count(), kMaxTiming); - *duration = static_cast<uint64_t>(count); - } else { - constexpr uint64_t kNoTiming = std::numeric_limits<uint64_t>::max(); - *duration = kNoTiming; + uint64_t microDuration = UINT64_MAX; + switch (durationCode) { + case ANEURALNETWORKS_DURATION_ON_HARDWARE: + microDuration = timingLaunched.timeOnDevice; + break; + case ANEURALNETWORKS_DURATION_IN_DRIVER: + microDuration = timingLaunched.timeInDriver; + break; + case ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE: + microDuration = timingFenced.timeOnDevice; + break; + case ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER: + microDuration = timingFenced.timeInDriver; + break; + default: + CHECK(!"unexpected"); } + *duration = (microDuration == UINT64_MAX) ? UINT64_MAX : kNanoPerMicro * microDuration; VLOG(EXECUTION) << "getDuration(" << durationCode << "): " << *duration; return ANEURALNETWORKS_NO_ERROR; @@ -420,7 +370,7 @@ "ANeuralNetworksCompilation_createForDevices with numDevices = 1"; return ANEURALNETWORKS_BAD_DATA; } - if (computationStarted()) { + if (mStarted) { LOG(ERROR) << "ANeuralNetworksExecution_setTimeout called after the execution has started."; return ANEURALNETWORKS_BAD_STATE; } @@ -437,7 +387,7 @@ } int ExecutionBuilder::setLoopTimeout(uint64_t duration) { - if (computationStarted()) { + if (mStarted) { LOG(ERROR) << "ANeuralNetworksExecution_setLoopTimeout called after the " "execution has started."; return ANEURALNETWORKS_BAD_STATE; @@ -451,33 +401,8 @@ return ANEURALNETWORKS_NO_ERROR; } -int ExecutionBuilder::enableInputAndOutputPadding(bool enable) { - if (computationStarted()) { - LOG(ERROR) << "ANeuralNetworksExecution_enableInputAndOutputPadding called after the " - "execution has started."; - return ANEURALNETWORKS_BAD_STATE; - } - if (mHasCalledSetInputOutput) { - LOG(ERROR) << "ANeuralNetworksExecution_enableInputAndOutputPadding called after an input " - "or output is set."; - return ANEURALNETWORKS_BAD_STATE; - } - mInputAndOutputPaddingEnabled = enable; - return ANEURALNETWORKS_NO_ERROR; -} - -int ExecutionBuilder::setReusable(bool reusable) { - if (computationStarted()) { - LOG(ERROR) << "ANeuralNetworksExecution_setReusable called after the " - "execution has started."; - return ANEURALNETWORKS_BAD_STATE; - } - mReusable = reusable; - return ANEURALNETWORKS_NO_ERROR; -} - int ExecutionBuilder::getOutputOperandDimensions(uint32_t index, uint32_t* dimensions) { - if (!completed()) { + if (!isFinished()) { LOG(ERROR) << "ANeuralNetworksExecution_getOutputOperandDimensions called before the " "execution has finished."; return ANEURALNETWORKS_BAD_STATE; @@ -506,7 +431,7 @@ } int ExecutionBuilder::getOutputOperandRank(uint32_t index, uint32_t* rank) { - if (!completed()) { + if (!isFinished()) { LOG(ERROR) << "ANeuralNetworksExecution_getOutputOperandRank called before the " "execution has finished."; return ANEURALNETWORKS_BAD_STATE; @@ -527,74 +452,6 @@ : ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE; } -bool ExecutionBuilder::checkAndSetComputationState(const char* name) { - std::lock_guard<std::mutex> lock(mStateMutex); - if (!mReusable && mState == State::COMPLETED) { - LOG(ERROR) << "ANeuralNetworksExecution_" << name - << " called on a non-reusable execution that has already completed"; - return false; - } - if (mState == State::COMPUTATION) { - LOG(ERROR) << "ANeuralNetworksExecution_" << name - << " called on an execution that has already started"; - return false; - } - mState = State::COMPUTATION; - return true; -} - -// TODO(b/132321855): validate that we have full types for all inputs and outputs, -// that the graph is not cyclic, -static int validateRequest(const std::vector<ModelArgumentInfo>& inputs, - const std::vector<ModelArgumentInfo>& outputs) { - for (auto& p : inputs) { - if (p.state() == ModelArgumentInfo::UNSPECIFIED) { - LOG(ERROR) << "ANeuralNetworksExecution starts compute when not all inputs specified"; - return ANEURALNETWORKS_BAD_DATA; - } - } - for (auto& p : outputs) { - if (p.state() == ModelArgumentInfo::UNSPECIFIED) { - LOG(ERROR) << "ANeuralNetworksExecution starts compute when not all outputs specified"; - return ANEURALNETWORKS_BAD_DATA; - } - } - return ANEURALNETWORKS_NO_ERROR; -} - -int ExecutionBuilder::getValidationResultCode() { - if (!mValidationResultCode.has_value()) { - mValidationResultCode = validateRequest(mInputs, mOutputs); - } - return mValidationResultCode.value(); -} - -bool ExecutionBuilder::areOutputsFullySpecified() { - if (!mOutputsFullySpecified.has_value()) { - mOutputsFullySpecified = true; - for (uint32_t i = 0; i < mOutputs.size(); i++) { - if (mOutputs[i].state() != ModelArgumentInfo::HAS_NO_VALUE && - TypeManager::get()->isTensorType(mModel->getOutputOperand(i).type) && - tensorHasUnspecifiedDimensions(mModel->getOutputOperand(i).type, - mOutputs[i].initialDimensions())) { - mOutputsFullySpecified = false; - break; - } - } - } - return mOutputsFullySpecified.value(); -} - -int ExecutionBuilder::prepareForCompute(const char* name) { - if (!checkAndSetComputationState(name)) { - return ANEURALNETWORKS_BAD_STATE; - } - if (int n = getValidationResultCode(); n != ANEURALNETWORKS_NO_ERROR) { - return finishComputation(n, {}); - } - return ANEURALNETWORKS_NO_ERROR; -} - // Attempt synchronous execution of full model on CPU. // TODO: How should we handle timing in this case? // For Q this is irrelevant: We only support timing in conjunction @@ -608,8 +465,7 @@ // Get fallback executor. StepExecutor executor(executionBuilder, executionBuilder->getModel(), - DeviceManager::getCpuDevice(), /*preparedModel=*/nullptr, - /*reusable=*/false); + DeviceManager::getCpuDevice(), /*preparedModel=*/nullptr); executor.mapInputsAndOutputsTrivially(); // Attempt fallback execution. @@ -629,9 +485,9 @@ // Get fallback executor. std::shared_ptr<StepExecutor> executor; - int n1 = plan.fallback(controller, &executor, nullptr, nullptr); + int n1 = plan.fallback(controller, &executor); if (n1 != ANEURALNETWORKS_NO_ERROR) { - return {n1, {}, {}, nullptr}; + return {n1, {}, kNoTiming, nullptr}; } CHECK(executor != nullptr); @@ -640,307 +496,191 @@ return {n2, std::move(outputShapes), timing, executor}; } -std::tuple<int, std::vector<OutputShape>, Timing> SimpleExecutionBuilder::computeInternal( - const OptionalTimePoint& deadline, BurstBuilder* burstBuilder) { - NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "SimpleExecutionBuilder::computeInternal"); - VLOG(EXECUTION) << "SimpleExecutionBuilder::computeInternal"; +static void asyncStartComputePartitioned(ExecutionBuilder* executionBuilder, + const ExecutionPlan& plan, + std::shared_ptr<ExecutionPlan::Controller> controller, + bool allowFallback, + const std::optional<Deadline>& deadline, + const sp<ExecutionCallback>& executionCallback) { + CHECK(executionBuilder != nullptr); + VLOG(EXECUTION) << "ExecutionBuilder::compute (from plan, iteratively)"; - if (mExecutor == nullptr) { - mExecutor = mPlan->makeStepExecutor(mReusable, this); - } - - auto burstController = burstBuilder ? burstBuilder->getControllerAt(0) : nullptr; - auto [n, outputShapes, timing] = mExecutor->compute(deadline, burstController); - - if (n == ANEURALNETWORKS_NO_ERROR) { - return {n, std::move(outputShapes), timing}; - } - - // ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE is not recoverable. - if (n == ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE) { - return {n, std::move(outputShapes), {}}; - } - - // If CPU fallback is not allowed and there was an error, end execution. - if (!mAllowCpuFallback) { - return {n, {}, {}}; - } - - // If CPU execution was already attempted, do not perform CPU fallback. - if (mExecutor->isCpu()) { - return {n, {}, {}}; - } - - // If the code has reached this point, a potentially recoverable error - // occurred during the execution. Do an execution fallback on the CPU. - return cpuFallbackFull(this); -} - -std::tuple<int, std::vector<OutputShape>, Timing> CompoundExecutionBuilder::computeInternal( - const OptionalTimePoint& deadline, BurstBuilder* burstBuilder) { - NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "CompoundExecutionBuilder::computeInternal"); - VLOG(EXECUTION) << "CompoundExecutionBuilder::computeInternal (from plan, iteratively)"; - - auto controller = mPlan->makeController(this, burstBuilder); - std::vector<OutputShape> outputShapes = getInitialOutputShapes(); - - // On this iteration, do I need to repeat the previous step because it - // reported insufficient size? - bool doInsufficientSizeFallback = false; + std::vector<OutputShape> outputShapes = executionBuilder->getInitialOutputShapes(); + Timing timing = kNoTiming; + // Disallow fallback when the ExecutionPlan is simple on CPU. + allowFallback &= !plan.isSimpleCpu(); while (true) { VLOG(EXECUTION) << "looking for next StepExecutor"; // Get the current step of the execution. std::shared_ptr<StepExecutor> executor; - SharedBurst burstController; - int n = doInsufficientSizeFallback - ? mPlan->fallback(controller, &executor, &burstController, &outputShapes) - : mPlan->next(controller, &executor, &burstController, &outputShapes); - doInsufficientSizeFallback = false; + std::shared_ptr<ExecutionBurstController> burstController; + int n = plan.next(controller, &executor, &burstController); if (n != ANEURALNETWORKS_NO_ERROR) { // During the interpreted execution of control flow, a loop timeout // might occur in ExecutionPlan::next(). bool missedDeadline = n == ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT || n == ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT; - if (mAllowCpuFallback && !missedDeadline) break; - return {n, {}, {}}; + if (allowFallback && !missedDeadline) break; + executionCallback->notify(convertResultCodeToErrorStatus(n), {}, kNoTiming); + return; } // If the code reached the end of the plan without error, then return // with no error. if (executor == nullptr) { - return {ANEURALNETWORKS_NO_ERROR, outputShapes, {}}; + executionCallback->notify(ErrorStatus::NONE, outputShapes, timing); + return; } const bool executorIsCpu = executor->isCpu(); // Attempt to execute a single step of the execution. - auto [stepN, stepOutputShapes, _] = executor->compute(deadline, burstController); + auto [stepN, stepOutputShapes, stepTiming] = executor->compute(deadline, burstController); - // Update global outputs and dynamic temporaries. - StepExecutor::UpdateOutputShapes updateOutputShapes = {}; - if (!executor->updateOutputShapes(stepN, stepOutputShapes, &outputShapes, - &updateOutputShapes)) { + // Update global outputs. + if (!executor->updateOutputShapes(stepOutputShapes, &outputShapes)) { stepN = ANEURALNETWORKS_OP_FAILED; } // If execution was successful, continue to next step. if (stepN == ANEURALNETWORKS_NO_ERROR) { - if (updateOutputShapes.zeroSizedInput) { - // We'll need to do full model CPU fallback - VLOG(EXECUTION) << "updateOutputShapes.zeroSizedInput"; - stepN = ANEURALNETWORKS_OP_FAILED; - } else { - CHECK(executor->areDynamicTemporariesAllocated()); - continue; - } - } - - if (stepN == ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE) { - VLOG(EXECUTION) << "OUTPUT_INSUFFICIENT_SIZE: " << toString(updateOutputShapes); - if (updateOutputShapes.mainOutputInsufficient || - !updateOutputShapes.updatedDynamicTemporary) { - // Either: - // - At least one main model output is not of sufficient size; or - // - we didn't learn anything new about dynamic temporaries. - // Neither of these is recoverable, so end execution. - return {stepN, outputShapes, {}}; - } - // Every main model output is of sufficient size. This implies that - // at least one dynamic temporary is not of sufficient size. This - // is recoverable. - doInsufficientSizeFallback = true; + // We only support collection of timing information in the case of a + // single step, so it's safe to just keep track of the last step's + // timing information. + timing = stepTiming; continue; } - // If CPU fallback is not allowed and there was an error, end execution. - if (!mAllowCpuFallback) { - return {stepN, {}, {}}; + // OUTPUT_INSUFFICIENT_SIZE is not recoverable, so end execution. + if (stepN == ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE) { + const ErrorStatus stepStatus = convertResultCodeToErrorStatus(stepN); + executionCallback->notify(stepStatus, outputShapes, kNoTiming); + return; } - // If CPU execution was already attempted, perform a full CPU fallback. + // If fallback is not allowed and there was an error, end execution. + if (!allowFallback) { + const ErrorStatus stepStatus = convertResultCodeToErrorStatus(stepN); + executionCallback->notify(stepStatus, {}, kNoTiming); + return; + } + + // If CPU execution was already attempted, either: + // (1) perform a full fallback if the plan is not simple, or + // (2) return from the function with an error if (executorIsCpu) { - break; + if (!plan.isSimple()) break; + executionCallback->notify(convertResultCodeToErrorStatus(stepN), {}, kNoTiming); + return; } // If the code reaches this point, attempt a partial fallback to CPU. - CHECK(mAllowCpuFallback); - if (updateOutputShapes.zeroSizedInput) { - // Do not attempt a partial fallback. - break; + CHECK(allowFallback); + auto [fallbackN, fallbackOutputShapes, fallbackTiming, fallbackExecutor] = + cpuFallbackPartial(plan, controller); + + // Update global outputs. + if (fallbackExecutor != nullptr && + !fallbackExecutor->updateOutputShapes(fallbackOutputShapes, &outputShapes)) { + fallbackN = ANEURALNETWORKS_OP_FAILED; } - while (true) { - auto [fallbackN, fallbackOutputShapes, _, fallbackExecutor] = - cpuFallbackPartial(*mPlan, controller); - // Update global outputs and dynamic temporaries. - StepExecutor::UpdateOutputShapes fallbackUpdateOutputShapes = {}; - if (fallbackExecutor != nullptr && - !fallbackExecutor->updateOutputShapes(fallbackN, fallbackOutputShapes, - &outputShapes, &fallbackUpdateOutputShapes)) { - fallbackN = ANEURALNETWORKS_OP_FAILED; - } + // If execution was successful, continue to next step. + if (fallbackN == ANEURALNETWORKS_NO_ERROR) { + // We only support collection of timing information in the case of a + // single step, so it's safe to just keep track of the last step's + // timing information. + timing = fallbackTiming; + continue; + } - // If execution was successful, continue to next step. - if (fallbackN == ANEURALNETWORKS_NO_ERROR) { - if (fallbackUpdateOutputShapes.zeroSizedInput) { - // We'll need to do full model CPU fallback - VLOG(EXECUTION) << "fallbackUpdateOutputShapes.zeroSizedInput"; - fallbackN = ANEURALNETWORKS_OP_FAILED; - break; - } - CHECK(fallbackExecutor->areDynamicTemporariesAllocated()); - goto nextStep; - } + // OUTPUT_INSUFFICIENT_SIZE is not recoverable, so end execution. + if (fallbackN == ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE) { + const ErrorStatus fallbackStatus = convertResultCodeToErrorStatus(fallbackN); + executionCallback->notify(fallbackStatus, outputShapes, kNoTiming); + return; + } - if (fallbackN == ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE) { - VLOG(EXECUTION) << "OUTPUT_INSUFFICIENT_SIZE: " - << toString(fallbackUpdateOutputShapes); - if (fallbackUpdateOutputShapes.mainOutputInsufficient || - !fallbackUpdateOutputShapes.updatedDynamicTemporary) { - // Either: - // - At least one main model output is not of sufficient size; or - // - we didn't learn anything new about dynamic temporaries. - // Neither of these is recoverable, so end execution. - return {fallbackN, outputShapes, {}}; - } - // Every main model output is of sufficient size. This implies - // that at least one dynamic temporary is not of sufficient - // size. This is recoverable. - continue; - } - - // If the code reaches this point, then there was an error with the - // fallback. In this case, attempt full fallback. - break; + // Do not fallback twice if the ExecutionPlan is simple. + if (plan.isSimple()) { + const ErrorStatus fallbackStatus = convertResultCodeToErrorStatus(fallbackN); + executionCallback->notify(fallbackStatus, {}, kNoTiming); + return; } // If the code reaches this point, then there was an error with the // fallback. In this case, attempt full fallback. break; - - nextStep: - // Bottom of the outer loop - continue; } // If the code has reached this point, a potentially recoverable error // occurred during the step executions. Instead, do a full execution // fallback on the CPU. - return cpuFallbackFull(this); + auto [fullN, fullOutputShapes, fullTiming] = cpuFallbackFull(executionBuilder); + const ErrorStatus fullStatus = convertResultCodeToErrorStatus(fullN); + executionCallback->notify(fullStatus, fullOutputShapes, fullTiming); } -static bool waitForSyncFences(const std::vector<int>& waitFor) { - for (int syncFd : waitFor) { - if (syncFd > 0) { - auto r = syncWait(syncFd, -1); - if (r != FenceState::SIGNALED) { - VLOG(EXECUTION) << "syncWait failed, fd: " << syncFd; - return false; - } - } - } - return true; -} - -std::tuple<int, int, ExecuteFencedInfoCallback> SimpleExecutionBuilder::computeFencedInternal( - const std::vector<int>& waitFor, uint64_t timeoutDurationAfterFence, - const OptionalTimePoint& deadline) { - NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "SimpleExecutionBuilder::computeFencedInternal"); - VLOG(EXECUTION) << "SimpleExecutionBuilder::computeFencedInternal"; - - if (mExecutor == nullptr) { - mExecutor = mPlan->makeStepExecutor(mReusable, this); - } - - auto [n, syncFd, callback] = - mExecutor->computeFenced(waitFor, timeoutDurationAfterFence, deadline); - - if (n == ANEURALNETWORKS_NO_ERROR) { - return {ANEURALNETWORKS_NO_ERROR, syncFd, callback}; - } - - // If CPU fallback is not allowed and there was an error, end execution. - if (!mAllowCpuFallback) { - return {n, -1, nullptr}; - } - - // If CPU execution was already attempted, return from the function with an error. - if (mExecutor->isCpu()) { - return {n, -1, nullptr}; - } - - // If the code has reached this point, a potentially recoverable error - // occurred during the step executions. Instead, do a full execution - // fallback on the CPU. - VLOG(EXECUTION) << "Performing full fallback on the CPU."; - if (!waitForSyncFences(waitFor)) { - return {ANEURALNETWORKS_OP_FAILED, -1, nullptr}; - } - auto [fallbackN, fallbackOutputShapes, fallbackTiming] = cpuFallbackFull(this); - reportTimingWithoutFencedExecutionCallback(fallbackTiming); - return {fallbackN, -1, nullptr}; -} - -// In case of partitioned execution, computeFencedInternal call will return the sync +// In case of partitioned execution, startComputeFenced call will return the sync // fence and the fenced compute callback returned from the last partition. -// Any failed partition will result in whole execution fallback to CPU if -// mAllowCpuFallback is set to true. -std::tuple<int, int, ExecuteFencedInfoCallback> CompoundExecutionBuilder::computeFencedInternal( - const std::vector<int>& waitFor, uint64_t timeoutDurationAfterFence, - const OptionalTimePoint& deadline) { - NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "CompoundExecutionBuilder::computeFencedInternal"); - VLOG(EXECUTION) << "CompoundExecutionBuilder::computeFencedInternal (from plan, iteratively)"; - - // We should have detected this earlier in the call chain and fallen back to - // non-fenced execution. This is an implementation limitation: In order to - // support dynamic temporarires in this code, we'd need to implement - // something like the following: - // - If a partition has outputs of unknown size, compute that partition in a - // non fenced fashion, just as if it were scheduled on a driver that does - // not support fenced execution. - // - Implement something similar to the code in CompoundExecutionBuilder::computeInternal() - // that handles a step execution that fails with - // ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE. - CHECK(!mCompilation->hasDynamicTemporaries()); +// Any failed partition will result in the whole execution fallback to CPU if +// allowFallback is set to true. +static std::tuple<int, int, sp<hal::IFencedExecutionCallback>> startComputeFenced( + ExecutionBuilder* executionBuilder, const ExecutionPlan& plan, + std::shared_ptr<ExecutionPlan::Controller> controller, const std::vector<int>& waitFor, + uint64_t timeoutDurationAfterFence, const std::optional<Deadline>& deadline, + bool allowFallback) { + CHECK(executionBuilder != nullptr); + VLOG(EXECUTION) << "ExecutionBuilder::computeFenced (from plan, iteratively)"; + // Disallow fallback when the ExecutionPlan is simple on CPU. + allowFallback &= !plan.isSimpleCpu(); // Initiate waitForFds, syncFence for the first step. std::vector<int> waitForFds = waitFor; - base::unique_fd syncFence; - ExecuteFencedInfoCallback executeFencedInfoCallback; + int syncFence = -1; + sp<hal::IFencedExecutionCallback> computeFencedCallback; - std::shared_ptr<ExecutionPlan::Controller> controller = mPlan->makeController(this, nullptr); while (true) { VLOG(EXECUTION) << "looking for next StepExecutor"; // Get the current step of the execution. std::shared_ptr<StepExecutor> executor; - int n = mPlan->next(controller, &executor, nullptr, nullptr, syncFence.get()); + int n = plan.next(controller, &executor, nullptr, syncFence); if (n != ANEURALNETWORKS_NO_ERROR) { // During the interpreted execution of control flow, a loop timeout // might occur in ExecutionPlan::next(). bool missedDeadline = n == ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT || n == ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT; - if (mAllowCpuFallback && !missedDeadline) break; + if (allowFallback && !missedDeadline) break; // Return -1 for the sync fence fd, and nullptr for the callback. - return {n, -1, nullptr}; + return std::make_tuple(n, -1, nullptr); } // If the code reached the end of the plan without error, then return // with no error. if (executor == nullptr) { - return {ANEURALNETWORKS_NO_ERROR, syncFence.release(), executeFencedInfoCallback}; + // If the final step returns a -1 for sync fence, the execution is finished. + // Update the output shapes. + if (syncFence == -1) { + // TODO(miaowang): support dynamic output shape only with memory domain. + // For now just return the initial output shapes. + executionBuilder->finishWithoutSyncFence( + ErrorStatus::NONE, executionBuilder->getInitialOutputShapes()); + } + return std::make_tuple(ANEURALNETWORKS_NO_ERROR, syncFence, computeFencedCallback); } + const bool executorIsCpu = executor->isCpu(); - // Attempt to compute a single step of the execution. + // Attempt to execute a single step of the execution. auto [stepN, syncFd, callback] = executor->computeFenced(waitForFds, timeoutDurationAfterFence, deadline); // Update waitForFds, syncFence for the next step. - syncFence.reset(syncFd); - executeFencedInfoCallback = callback; + syncFence = syncFd; + computeFencedCallback = callback; waitForFds.clear(); - if (syncFd >= 0) { + if (syncFd > 0) { waitForFds = {syncFd}; } @@ -948,11 +688,18 @@ if (stepN == ANEURALNETWORKS_NO_ERROR) { continue; } - // If CPU fallback is not allowed and there was an error, end execution. - if (!mAllowCpuFallback) { - return {stepN, -1, nullptr}; + // If fallback is not allowed and there was an error, end execution. + if (!allowFallback) { + return std::make_tuple(stepN, -1, nullptr); } + // If CPU execution was already attempted, either: + // (1) perform a full fallback if the plan is not simple, or + // (2) return from the function with an error + if (executorIsCpu) { + if (!plan.isSimple()) break; + return std::make_tuple(stepN, -1, nullptr); + } // If the code reaches this point, then there was an error with the // fallback. In this case, attempt full fallback. break; @@ -962,17 +709,31 @@ // occurred during the step executions. Instead, do a full execution // fallback on the CPU. VLOG(EXECUTION) << "Performing full fallback on the CPU."; - if (!waitForSyncFences(waitFor)) { - return {ANEURALNETWORKS_OP_FAILED, -1, nullptr}; + for (int syncFd : waitFor) { + if (syncFd > 0) { + auto r = syncWait(syncFd, -1); + if (r != FenceState::SIGNALED) { + VLOG(EXECUTION) << "syncWait failed, fd: " << syncFd; + return std::make_tuple(ANEURALNETWORKS_OP_FAILED, -1, nullptr); + } + } } - auto [fullN, fullOutputShapes, _] = cpuFallbackFull(this); - return {fullN, -1, nullptr}; + auto [fullN, fullOutputShapes, fullTiming] = cpuFallbackFull(executionBuilder); + const ErrorStatus fullStatus = convertResultCodeToErrorStatus(fullN); + syncFence = -1; + executionBuilder->finishWithoutSyncFence(fullStatus, fullOutputShapes); + executionBuilder->reportTimingWithoutFencedExecutionCallback(fullTiming); + return std::make_tuple(fullN, syncFence, nullptr); } int ExecutionBuilder::computeFenced(const std::vector<int>& waitFor, uint64_t timeoutDurationAfterFence, int* syncFence) { CHECK(syncFence != nullptr); - NN_RETURN_IF_ERROR(prepareForCompute("startComputeWithDependencies")); + if (mStarted) { + LOG(ERROR) << "ANeuralNetworksExecution_startComputeWithDependencies" + " called on an execution that has already started"; + return ANEURALNETWORKS_BAD_STATE; + } if (timeoutDurationAfterFence > 0) { if (!mCompilation->mExplicitDeviceList || (mCompilation->mDevices.size() != 1)) { LOG(ERROR) @@ -980,34 +741,45 @@ "duration on an ANeuralNetworksExecution " "created from an ANeuralNetworksCompilation that was not created by " "ANeuralNetworksCompilation_createForDevices with numDevices = 1"; - return finishComputation(ANEURALNETWORKS_BAD_DATA, {}); + return ANEURALNETWORKS_BAD_DATA; } } - if (!areOutputsFullySpecified()) { - LOG(ERROR) << "ANeuralNetworksExecution_startComputeWithDependencies" - " not all outputs have fully specified dimensions"; - return finishComputation(ANEURALNETWORKS_BAD_DATA, {}); + const auto deadline = makeDeadline(mTimeoutDuration); + for (auto& p : mInputs) { + if (p.state() == ModelArgumentInfo::UNSPECIFIED) { + LOG(ERROR) << "ANeuralNetworksExecution_startComputeWithDependencies" + " not all inputs specified"; + return ANEURALNETWORKS_BAD_DATA; + } } - - // Unlike ExecutionBuilder::compute, we do not need to reset output dimensions here because - // fenced executions do not support dynamic output shape. - + for (auto& p : mOutputs) { + if (p.state() == ModelArgumentInfo::UNSPECIFIED) { + LOG(ERROR) << "ANeuralNetworksExecution_startComputeWithDependencies" + " not all outputs specified"; + return ANEURALNETWORKS_BAD_DATA; + } + } + for (uint32_t i = 0; i < mOutputs.size(); i++) { + if (mOutputs[i].state() != ModelArgumentInfo::HAS_NO_VALUE && + !checkDimensionInfo(mModel->getOutputOperand(i), nullptr, + "ANeuralNetworksExecution_startComputeWithDependencies", false)) { + LOG(ERROR) << "ANeuralNetworksExecution_startComputeWithDependencies" + " not all outputs have fully specified dimensions"; + return ANEURALNETWORKS_BAD_DATA; + } + } + mStarted = true; + const bool allowFallback = DeviceManager::partitioningAllowsFallback(mPartitioning); + std::shared_ptr<ExecutionPlan::Controller> controller = mPlan->makeController(this, nullptr); VLOG(EXECUTION) << "ExecutionBuilder::computeFenced"; int result; - const auto deadline = makeDeadline(mTimeoutDuration); - std::tie(result, *syncFence, mFencedExecutionCallback) = - computeFencedInternal(waitFor, timeoutDurationAfterFence, deadline); - // If there is an error, call finishComputation to mark the computation as completed. - // Otherwise, we will call finishComputation in SyncFenceEvent::wait(). - if (result != ANEURALNETWORKS_NO_ERROR) { - // TODO(miaowang): support dynamic output shape only with memory domain. - // For now just return empty output shapes. - result = finishComputation(result, {}); - } + std::tie(result, mSyncFenceFd, mFencedExecutionCallback) = startComputeFenced( + this, *mPlan, controller, waitFor, timeoutDurationAfterFence, deadline, allowFallback); + *syncFence = mSyncFenceFd; return result; } -int ExecutionBuilder::compute(std::shared_ptr<ExecutionCallback>* synchronizationCallback, +int ExecutionBuilder::compute(sp<ExecutionCallback>* synchronizationCallback, BurstBuilder* burstBuilder) { CHECK(synchronizationCallback == nullptr || burstBuilder == nullptr) << "synchronizationCallback and burstBuilder cannot simultaneously be used"; @@ -1017,69 +789,82 @@ *synchronizationCallback = nullptr; } - const char* name = burstBuilder ? "burstCompute" : synchronous ? "compute" : "startCompute"; - NN_RETURN_IF_ERROR(prepareForCompute(name)); + const auto deadline = makeDeadline(mTimeoutDuration); - // Validate input memory dimensions. We need to do the validation in every computation because - // the memory dimensions may change between computations. + // TODO validate that we have full types for all inputs and outputs, + // that the graph is not cyclic, + + auto name = [synchronous, burstBuilder] { + return burstBuilder ? "burstCompute" : synchronous ? "compute" : "startCompute"; + }; + if (mStarted) { + LOG(ERROR) << "ANeuralNetworksExecution_" << name() + << " called on an execution that has already started"; + return ANEURALNETWORKS_BAD_STATE; + } for (auto& p : mInputs) { - if (p.state() == ModelArgumentInfo::MEMORY) { - const RuntimeMemory* memory = mMemories[p.locationAndLength().poolIndex]; + if (p.state() == ModelArgumentInfo::UNSPECIFIED) { + LOG(ERROR) << "ANeuralNetworksExecution_" << name() << " not all inputs specified"; + return ANEURALNETWORKS_BAD_DATA; + } else if (p.state() == ModelArgumentInfo::MEMORY) { + const Memory* memory = mMemories[p.locationAndLength().poolIndex]; if (!memory->getValidator().validateInputDimensions(p.dimensions())) { - return finishComputation(ANEURALNETWORKS_OP_FAILED, {}); + return ANEURALNETWORKS_OP_FAILED; } } } - - // Reset output dimensions. - if (!areOutputsFullySpecified()) { - for (auto& output : mOutputs) { - output.reset(); + for (auto& p : mOutputs) { + if (p.state() == ModelArgumentInfo::UNSPECIFIED) { + LOG(ERROR) << "ANeuralNetworksExecution_" << name() << " not all outputs specified"; + return ANEURALNETWORKS_BAD_DATA; } } - const auto deadline = makeDeadline(mTimeoutDuration); - if (synchronous) { - if (burstBuilder) { - VLOG(EXECUTION) << "ExecutionBuilder::compute (synchronous API, burst)"; - } else { - VLOG(EXECUTION) << "ExecutionBuilder::compute (synchronous API)"; - } - const auto [n, outputShapes, timing] = computeInternal(deadline, burstBuilder); - if (mMeasureTiming) { - mTimingWithoutFencedExecutionCallback = timing; - } - return finishComputation(n, outputShapes); - } else /* asynchronous */ { - // TODO: For asynchronous execution, entire plan-based-path should run in an - // asynchronous thread -- take the asynchronous thread logic out of - // CpuExecution::compute() and use it to wrap the plan-based-path. + auto wrappedFinish = [this](ErrorStatus error, const std::vector<OutputShape>& outputShapes) { + return finishWithoutSyncFence(error, outputShapes); + }; + // TODO: For asynchronous execution, entire plan-based-path should run in an + // asynchronous thread -- take the asynchronous thread logic out of + // CpuPreparedModel::execute() and use it to wrap the plan-based-path. + mStarted = true; + const bool allowFallback = DeviceManager::partitioningAllowsFallback(mPartitioning); + std::shared_ptr<ExecutionPlan::Controller> controller = + mPlan->makeController(this, burstBuilder); + if (synchronous) { + VLOG(EXECUTION) << "ExecutionBuilder::compute (synchronous API)"; + sp<ExecutionCallback> localSynchronizationCallback = new ExecutionCallback(); + localSynchronizationCallback->setOnFinish(wrappedFinish); + asyncStartComputePartitioned(this, *mPlan, controller, allowFallback, deadline, + localSynchronizationCallback); + localSynchronizationCallback->wait(); + if (mMeasureTiming) { + mTimingWithoutFencedExecutionCallback = localSynchronizationCallback->getTiming(); + } + return convertErrorStatusToResultCode(localSynchronizationCallback->getStatus()); + } else /* asynchronous */ { // TODO: use a thread pool // TODO(mikie): this could have NNTRACE so we could measure the overhead // of spinning up a new thread. // Prepare the callback for asynchronous execution. - // std::shared_ptr<ExecutionCallback> object is returned when the + // sp<ExecutionCallback> object is returned when the // execution has been successfully launched, otherwise a // nullptr is returned. The executionCallback is // abstracted in the NN API as an "event". - auto executionCallback = std::make_shared<ExecutionCallback>(); - executionCallback->setOnFinish( - [this](ErrorStatus error, const std::vector<OutputShape>& outputShapes) { - return finishComputation(error, outputShapes); - }); - const auto asyncStartCompute = [this, deadline, executionCallback] { - const auto [n, outputShapes, timing] = computeInternal(deadline, nullptr); - const auto status = convertResultCodeToErrorStatus(n); - executionCallback->notify(status, outputShapes, timing); - }; + sp<ExecutionCallback> executionCallback = new ExecutionCallback(); + executionCallback->setOnFinish(wrappedFinish); if (DeviceManager::get()->syncExecRuntime()) { VLOG(EXECUTION) << "ExecutionBuilder::compute (asynchronous API, non-threaded)"; - asyncStartCompute(); + asyncStartComputePartitioned(this, *mPlan, controller, allowFallback, deadline, + executionCallback); } else { VLOG(EXECUTION) << "ExecutionBuilder::compute (asynchronous API)"; - std::thread asyncExecution(asyncStartCompute); + std::thread asyncExecution( + [this, controller, allowFallback, deadline, executionCallback] { + asyncStartComputePartitioned(this, *mPlan, controller, allowFallback, + deadline, executionCallback); + }); executionCallback->bindThread(std::move(asyncExecution)); } *synchronizationCallback = executionCallback; @@ -1091,7 +876,7 @@ std::vector<OutputShape> outputShapes(mOutputs.size()); std::transform(mOutputs.begin(), mOutputs.end(), outputShapes.begin(), [](const auto& x) -> OutputShape { - std::vector<uint32_t> dimensions; + hidl_vec<uint32_t> dimensions; if (x.state() != ModelArgumentInfo::HAS_NO_VALUE) { dimensions = x.dimensions(); } @@ -1101,7 +886,7 @@ } // Check if the dimensions "to" is updatable by dimensions "from", where "from" must -// have no lower a specification level. +// have a higher specification level. static bool isUpdatable(const std::vector<uint32_t>& to, const std::vector<uint32_t>& from) { if (to.size() == 0) return true; NN_RET_CHECK_EQ(to.size(), from.size()); @@ -1111,17 +896,7 @@ return true; } -static bool isZeroSizedTensor(int executionResultCode, const OutputShape& outputShape) { - return (executionResultCode == ANEURALNETWORKS_NO_ERROR) && outputShape.isSufficient && - outputShape.dimensions.size() && - (std::find(outputShape.dimensions.begin(), outputShape.dimensions.end(), uint32_t(0)) != - outputShape.dimensions.end()); -} - -bool ExecutionBuilder::updateOutputShapes(ErrorStatus status, - const std::vector<OutputShape>& outputShapes) { - NN_RET_CHECK(validateOutputShapesFromDriver(status, mModel, outputShapes)); - +bool ExecutionBuilder::updateOutputShapes(const std::vector<OutputShape>& outputShapes) { if (outputShapes.size() == 0) { return true; } @@ -1143,162 +918,54 @@ bool ExecutionBuilder::updateMemories() { for (const auto& output : mOutputs) { if (output.state() != ModelArgumentInfo::MEMORY) continue; - const RuntimeMemory* memory = mMemories[output.locationAndLength().poolIndex]; + const Memory* memory = mMemories[output.locationAndLength().poolIndex]; NN_RET_CHECK(memory->getValidator().updateMetadata({.dimensions = output.dimensions()})); } return true; } -int ExecutionBuilder::finishComputation(int result, const std::vector<OutputShape>& outputShapes) { - const auto status = convertResultCodeToErrorStatus(result); - if (!updateOutputShapes(status, outputShapes) || !updateMemories()) { - result = ANEURALNETWORKS_OP_FAILED; +ErrorStatus ExecutionBuilder::finishWithoutSyncFence(ErrorStatus status, + const std::vector<OutputShape>& outputShapes) { + CHECK(!mFinishedWithoutSyncFence) << "ExecutionBuilder::finishWithoutSyncFence is called twice"; + CHECK(!hasSyncFence()) + << "ExecutionBuilder::finishWithoutSyncFence is called when hasSyncFence()"; + if (!updateOutputShapes(outputShapes) || !updateMemories()) { + status = ErrorStatus::GENERAL_FAILURE; } - bool success = result == ANEURALNETWORKS_NO_ERROR; + bool success = status == ErrorStatus::NONE; for (const auto& output : mOutputs) { if (output.state() != ModelArgumentInfo::MEMORY) continue; - const RuntimeMemory* memory = mMemories[output.locationAndLength().poolIndex]; + const Memory* memory = mMemories[output.locationAndLength().poolIndex]; memory->getValidator().setInitialized(success); } - switch (result) { + switch (convertErrorStatusToResultCode(status)) { case ANEURALNETWORKS_NO_ERROR: - mCompletion = Completion::NO_ERROR; + mCompletionWithoutSyncFence = Completion::NO_ERROR; break; case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE: - mCompletion = Completion::OUTPUT_INSUFFICIENT_SIZE; + mCompletionWithoutSyncFence = Completion::OUTPUT_INSUFFICIENT_SIZE; break; default: - mCompletion = Completion::OTHER_ERROR; + mCompletionWithoutSyncFence = Completion::OTHER_ERROR; break; } - { - std::lock_guard<std::mutex> lock(mStateMutex); - CHECK(mState != State::PREPARATION) - << "ExecutionBuilder::finishComputation is called in the preparation state"; - CHECK(mState != State::COMPLETED) << "ExecutionBuilder::finishComputation is called twice"; - mState = State::COMPLETED; - } - return result; + mFinishedWithoutSyncFence = true; + return status; } -std::string toString(StepExecutor::UpdateOutputShapes updateOutputShapes) { - return "{ .updatedDynamicTemporary = " + - std::to_string(updateOutputShapes.updatedDynamicTemporary) + - ", .mainOutputInsufficient = " + - std::to_string(updateOutputShapes.mainOutputInsufficient) + "}"; -} - -bool StepExecutor::updateOutputShapes(int executionResultCode, const std::vector<OutputShape>& from, - std::vector<OutputShape>* to, UpdateOutputShapes* update) { - CHECK(update != nullptr); - *update = {.updatedDynamicTemporary = false, - .mainOutputInsufficient = false, - .zeroSizedInput = false}; - - NN_RET_CHECK(validateOutputShapesFromDriver(executionResultCode, mModel, from)); - +bool StepExecutor::updateOutputShapes(const std::vector<OutputShape>& from, + std::vector<OutputShape>* to) { if (from.size() == 0) { return true; } - - if (VLOG_IS_ON(EXECUTION)) { - for (const auto& shape : from) { - VLOG(EXECUTION) << "updateOutputShapes: " << shape; - } - } - if (mExecutionStep != nullptr) { const auto& indexMapping = mExecutionStep->getOutputIndexStepModelToMainModel(); NN_RET_CHECK_LE(indexMapping.size(), from.size()); for (uint32_t i = 0, e = indexMapping.size(); i < e; i++) { - const uint32_t toIndex = indexMapping[i]; + uint32_t toIndex = indexMapping[i]; NN_RET_CHECK_GT(to->size(), toIndex); NN_RET_CHECK(isUpdatable(to->at(toIndex).dimensions, from[i].dimensions)); (*to)[toIndex] = from[i]; - update->mainOutputInsufficient |= !(*to)[toIndex].isSufficient; - if (mExecutionStep->getModelOutputsThatAreDownstreamInputs().count(toIndex) && - isZeroSizedTensor(executionResultCode, from[i])) { - update->zeroSizedInput = true; - } - } - - if (!mDynamicTemporaries->empty()) { - // TODO(b/157236079): Instead of computing this here, precompute it in ExecutionStep? - std::map<uint32_t, uint32_t> operandIndexStepModelOutputToSourceModelTemp; - for (const auto& entry : mExecutionStep->getTempsAsStepModelOutputs()) { - operandIndexStepModelOutputToSourceModelTemp.emplace(entry.second, entry.first); - } - - const uint32_t sourceModelIndex = mExecutionStep->getSourceModelIndex(); - for (uint32_t i = 0, e = mModel->outputCount(); i < e; i++) { - const uint32_t stepModelOperandIndex = mModel->getOutputOperandIndex(i); - const auto it = - operandIndexStepModelOutputToSourceModelTemp.find(stepModelOperandIndex); - if (it == operandIndexStepModelOutputToSourceModelTemp.end()) { - continue; - } - const auto sourceOperandIndex = SourceOperandIndex(sourceModelIndex, it->second); - VLOG(EXECUTION) << "updateOutputShapes checking to see if output#" << i - << " sourceOperandIndex = (" << sourceOperandIndex.first << ", " - << sourceOperandIndex.second << ") is a dynamic temporary"; - // This is a temporary, but it might not be a dynamic temporary. - const auto loc = mDynamicTemporaries->lookup(sourceOperandIndex, false); - if (loc == std::nullopt) { - continue; - } - NN_RET_CHECK(isUpdatable(*loc->dimensions, from[i].dimensions)); - bool changedShape = false; - const uint32_t actualSize = TypeManager::get()->getSizeOfData( - mModel->getOperand(stepModelOperandIndex).type, from[i].dimensions); - if (actualSize > 0) { - changedShape = mDynamicTemporaries->redeclare(sourceOperandIndex, - from[i].dimensions, actualSize); - } else if (!from[i].isSufficient) { - NN_RET_CHECK(loc->paddedLength < UINT32_MAX / 2) - << "output#" << i << " paddedLength overflow"; - changedShape = mDynamicTemporaries->redeclare( - sourceOperandIndex, from[i].dimensions, 2 * loc->paddedLength); - } else { - // The combination of not-fully-specified dimensions - // and isSufficient means that we have no - // information about whether the size of the dynamic - // temporary is adequate. - VLOG(EXECUTION) << "updateOutputShapes skipping redeclaration for output#" << i; - if (executionResultCode == ANEURALNETWORKS_NO_ERROR) { - NN_RET_CHECK(isZeroSizedTensor(executionResultCode, from[i])); - // This is a zero-sized tensor, and by - // definition, any dynamic temporary is an input - // to an execution step. - update->zeroSizedInput = true; - } - } - if (changedShape) { - // TODO: find a better place for this comment. - // - // isUpdatable(a, b) imposes a partial ordering a <= - // b. Every fully specified dimensions vector is an - // upper bound of that ordering. Therefore, any - // change in dimensions moves towards an upper - // bound, and hence there are a finite number of - // such changes possible. - // - // actualSize can only be computed from dimensions - // that are an upper bound. Therefore, once - // actualSize is computed, it will not change. - // - // If dimensions are not fully specified, and - // estimated size changes, it increases. There is - // an upper bound on estimated size to avoid - // overflow. - // - // Therefore, if we retry only when dimensions or - // size chage, and we stop retrying if we would - // otherwise overflow, we should only retry a finite - // number of times. - update->updatedDynamicTemporary = true; - } - } - mDynamicTemporaries->vlogDump("finished updateOutputShapes"); } } else { NN_RET_CHECK_EQ(from.size(), to->size()); @@ -1312,28 +979,19 @@ StepExecutor::StepExecutor(ExecutionBuilder* executionBuilder, const ModelBuilder* model, std::shared_ptr<Device> device, - std::shared_ptr<RuntimePreparedModel> preparedModel, bool reusable, - const ExecutionStep* step, DynamicTemporaries* dynamicTemporaries) + std::shared_ptr<PreparedModel> preparedModel, const ExecutionStep* step) : mExecutionBuilder(executionBuilder), mExecutionStep(step), - mDynamicTemporaries(dynamicTemporaries), mModel(model), mDevice(device), mPreparedModel(preparedModel), mInputs(model->inputCount()), - mOutputs(model->outputCount()), - mReusable(reusable) { + mOutputs(model->outputCount()) { CHECK(mDevice != nullptr); - CHECK_EQ(step == nullptr, dynamicTemporaries == nullptr); - CHECK(!(reusable && dynamicTemporaries != nullptr)); VLOG(EXECUTION) << "StepExecutor::StepExecutor with " << mInputs.size() << " inputs and " << mOutputs.size() << " outputs"; } -bool StepExecutor::areDynamicTemporariesAllocated() const { - return !mDynamicTemporaries || mDynamicTemporaries->allocated(mExecutionStep->getIndex()); -} - void StepExecutor::mapInputsAndOutputsTrivially() { mInputs = mExecutionBuilder->mInputs; mOutputs = mExecutionBuilder->mOutputs; @@ -1341,30 +999,19 @@ } void StepExecutor::mapInputOrOutput(const ModelArgumentInfo& builderInputOrOutput, - ModelArgumentInfo* executorInputOrOutput, - const Dimensions* builderDimensions) { - auto updateDimensions = [executorInputOrOutput, builderDimensions] { - if (!builderDimensions) { - return; - } - executorInputOrOutput->dimensions() = *builderDimensions; - }; - + ModelArgumentInfo* executorInputOrOutput) { *executorInputOrOutput = builderInputOrOutput; switch (executorInputOrOutput->state()) { default: CHECK(false) << "unexpected ModelArgumentInfo::state"; break; case ModelArgumentInfo::HAS_NO_VALUE: + case ModelArgumentInfo::POINTER: case ModelArgumentInfo::UNSPECIFIED: break; - case ModelArgumentInfo::POINTER: - updateDimensions(); - break; case ModelArgumentInfo::MEMORY: { - updateDimensions(); const uint32_t builderPoolIndex = builderInputOrOutput.locationAndLength().poolIndex; - const RuntimeMemory* memory = mExecutionBuilder->mMemories[builderPoolIndex]; + const Memory* memory = mExecutionBuilder->mMemories[builderPoolIndex]; const uint32_t executorPoolIndex = mMemories.add(memory); executorInputOrOutput->locationAndLength().poolIndex = executorPoolIndex; break; @@ -1373,55 +1020,34 @@ } int StepExecutor::setInputOrOutputFromMemory(const Operand& inputOrOutputOperand, - const RuntimeMemory* memory, uint32_t offset, - uint32_t length, const Dimensions& dimensions, + const Memory* memory, uint32_t offset, ModelArgumentInfo* inputOrOutputInfo) { // Should be similar to // ExecutionBuilder::setInputFromMemory() // ExecutionBuilder::setOutputFromMemory() uint32_t poolIndex = mMemories.add(memory); + uint32_t length = TypeManager::get()->getSizeOfData(inputOrOutputOperand); CHECK(inputOrOutputInfo->unspecified()); int n; std::tie(n, *inputOrOutputInfo) = ModelArgumentInfo::createFromMemory(inputOrOutputOperand, /*type=*/nullptr, poolIndex, offset, length); - if (n == ANEURALNETWORKS_NO_ERROR && dimensions.size()) { - CHECK(isUpdatable(inputOrOutputInfo->dimensions(), dimensions)); - inputOrOutputInfo->dimensions() = dimensions; - } return n; } -static std::string toString(std::vector<uint32_t> dimensions) { - std::string ret = "("; - bool wroteOne = false; - for (uint32_t dimension : dimensions) { - if (wroteOne) { - ret += ", "; - } else { - wroteOne = true; - } - ret += std::to_string(dimension); - } - ret += ")"; - return ret; -}; - static void logArguments(const char* kind, const std::vector<ModelArgumentInfo>& args) { for (unsigned i = 0; i < args.size(); i++) { const auto& arg = args[i]; std::string prefix = kind + std::string("[") + std::to_string(i) + "] = "; switch (arg.state()) { case ModelArgumentInfo::POINTER: - VLOG(EXECUTION) << prefix << "POINTER(" << SHOW_IF_DEBUG(arg.buffer()) << ") dim" - << toString(arg.dimensions()); + VLOG(EXECUTION) << prefix << "POINTER(" << SHOW_IF_DEBUG(arg.buffer()) << ")"; break; case ModelArgumentInfo::MEMORY: VLOG(EXECUTION) << prefix << "MEMORY(" << "pool=" << arg.locationAndLength().poolIndex << ", " - << "off=" << arg.locationAndLength().offset << ") dim" - << toString(arg.dimensions()); + << "off=" << arg.locationAndLength().offset << ")"; break; case ModelArgumentInfo::HAS_NO_VALUE: VLOG(EXECUTION) << prefix << "HAS_NO_VALUE"; @@ -1440,113 +1066,88 @@ return mDevice == DeviceManager::getCpuDevice(); } -std::pair<int, std::shared_ptr<RuntimeExecution>> StepExecutor::getReusableExecution() { - CHECK(mReusable); - if (mExecution == nullptr) { - CHECK(mPreparedModel != nullptr); - const MeasureTiming measure = measureTiming(mExecutionBuilder); - const OptionalDuration loopTimeoutDuration = - makeTimeoutDuration(mExecutionBuilder->getLoopTimeoutDuration()); - auto [n, execution] = mPreparedModel->createReusableExecution( - mInputs, mOutputs, mMemories.getObjects(), measure, loopTimeoutDuration); - if (n != ANEURALNETWORKS_NO_ERROR) { - return {n, nullptr}; - } - mExecution = std::move(execution); - } - return {ANEURALNETWORKS_NO_ERROR, mExecution}; +static OptionalTimeoutDuration makeTimeoutDuration(uint64_t nanoseconds) { + OptionalTimeoutDuration otd; + otd.nanoseconds(nanoseconds); + return otd; } std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::compute( - const OptionalTimePoint& deadline, const SharedBurst& burstController) { - if (VLOG_IS_ON(EXECUTION)) { - logArguments("input", mInputs); - logArguments("output", mOutputs); - } - - int n; - std::vector<OutputShape> outputShapes; - Timing timing; - if (mReusable) { - auto [nCreate, execution] = getReusableExecution(); - if (nCreate != ANEURALNETWORKS_NO_ERROR) { - return {nCreate, {}, {}}; - } - std::tie(n, outputShapes, timing) = execution->compute(burstController, deadline); - } else { - CHECK(mPreparedModel != nullptr); - const MeasureTiming measure = measureTiming(mExecutionBuilder); - const OptionalDuration loopTimeoutDuration = - makeTimeoutDuration(mExecutionBuilder->getLoopTimeoutDuration()); - std::tie(n, outputShapes, timing) = - mPreparedModel->execute(mInputs, mOutputs, mMemories.getObjects(), burstController, - measure, deadline, loopTimeoutDuration); - } - mExecutionBuilder->reportTimingWithoutFencedExecutionCallback(timing); - return {n, std::move(outputShapes), std::move(timing)}; + const std::optional<Deadline>& deadline, + const std::shared_ptr<ExecutionBurstController>& burstController) { + return computeWithMemories(deadline, mMemories.getObjects(), burstController); } -std::tuple<int, int, ExecuteFencedInfoCallback> StepExecutor::computeFenced( - const std::vector<int>& waitFor, uint64_t timeoutDurationAfterFence, - const OptionalTimePoint& deadline) { +std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::computeWithMemories( + const std::optional<Deadline>& deadline, const std::vector<const Memory*>& memories, + const std::shared_ptr<ExecutionBurstController>& burstController) { + CHECK(mPreparedModel != nullptr); + if (VLOG_IS_ON(EXECUTION)) { logArguments("input", mInputs); logArguments("output", mOutputs); } - OptionalDuration optionalTimeoutDurationAfterFence; - if (timeoutDurationAfterFence > 0) { - optionalTimeoutDurationAfterFence = makeTimeoutDuration(timeoutDurationAfterFence); + const MeasureTiming measure = measureTiming(mExecutionBuilder); + const OptionalTimeoutDuration loopTimeoutDuration = + makeTimeoutDuration(mExecutionBuilder->getLoopTimeoutDuration()); + const auto [n, outputShapes, timing] = mPreparedModel->execute( + mInputs, mOutputs, memories, burstController, measure, deadline, loopTimeoutDuration); + mExecutionBuilder->reportTimingWithoutFencedExecutionCallback(timing); + + return {n, std::move(outputShapes), timing}; +} + +std::tuple<int, int, sp<hal::IFencedExecutionCallback>> StepExecutor::computeFenced( + const std::vector<int>& waitFor, uint64_t timeoutDurationAfterFence, + const std::optional<Deadline>& deadline) { + CHECK(mPreparedModel != nullptr); + + if (VLOG_IS_ON(EXECUTION)) { + logArguments("input", mInputs); + logArguments("output", mOutputs); } - int n; - int syncFenceFd; - ExecuteFencedInfoCallback executeFencedInfoCallback; - Timing timing; - if (mReusable) { - auto [nCreate, execution] = getReusableExecution(); - if (nCreate != ANEURALNETWORKS_NO_ERROR) { - return {nCreate, -1, nullptr}; - } - std::tie(n, syncFenceFd, executeFencedInfoCallback, timing) = - execution->computeFenced(waitFor, deadline, optionalTimeoutDurationAfterFence); - } else { - CHECK(mPreparedModel != nullptr); - const MeasureTiming measure = measureTiming(mExecutionBuilder); - const OptionalDuration loopTimeoutDuration = - makeTimeoutDuration(mExecutionBuilder->getLoopTimeoutDuration()); - std::tie(n, syncFenceFd, executeFencedInfoCallback, timing) = mPreparedModel->executeFenced( - mInputs, mOutputs, mMemories.getObjects(), waitFor, measure, deadline, - loopTimeoutDuration, optionalTimeoutDurationAfterFence); + const MeasureTiming measure = measureTiming(mExecutionBuilder); + const OptionalTimeoutDuration loopTimeoutDuration = + makeTimeoutDuration(mExecutionBuilder->getLoopTimeoutDuration()); + OptionalTimeoutDuration optionalTimeoutDurationAfterFence; + if (timeoutDurationAfterFence > 0) { + optionalTimeoutDurationAfterFence.nanoseconds(timeoutDurationAfterFence); } - if (syncFenceFd < 0 && executeFencedInfoCallback == nullptr) { + const auto [n, syncFence, computeFencedCallback, timing] = mPreparedModel->executeFenced( + mInputs, mOutputs, mMemories.getObjects(), waitFor, measure, deadline, + loopTimeoutDuration, optionalTimeoutDurationAfterFence); + if (syncFence < 0 && computeFencedCallback == nullptr) { mExecutionBuilder->reportTimingWithoutFencedExecutionCallback(timing); } - return {n, syncFenceFd, executeFencedInfoCallback}; + return {n, syncFence, computeFencedCallback}; } // For cpuFallback{Partial,Full}, recompile the model on CPU and then start compute. std::tuple<int, std::vector<OutputShape>, Timing> StepExecutor::computeOnCpuFallback() { NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "StepExecutor::computeOnCpuFallback"); VLOG(EXECUTION) << "Re-compile the model on CPU"; - const ModelFactory makeModel = [this] { return mModel->makeModel(); }; + mDevice = DeviceManager::getCpuDevice(); + mPreparedModel = nullptr; + const ModelFactory makeModel = [this] { return mModel->makeHidlModel(); }; // TODO: Propagate user preference and compilation priority to this point instead of using // default values of ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER and // ANEURALNETWORKS_PRIORITY_MEDIUM const ExecutionPreference preference = static_cast<ExecutionPreference>(ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER); - const Priority priority = convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_DEFAULT); - auto [n, preparedModel] = DeviceManager::getCpuDevice()->prepareModel(makeModel, preference, - priority, {}, {}, {}); + const Priority priority = convertToHalPriority(ANEURALNETWORKS_PRIORITY_DEFAULT); + auto [n, preparedModel] = mDevice->prepareModel(makeModel, preference, priority, {}, {}, {}); + mPreparedModel = std::move(preparedModel); if (n != ANEURALNETWORKS_NO_ERROR) { - return {n, {}, {}}; + return {n, {}, kNoTiming}; } // Prepare device memories for CPU fallback. - std::vector<const RuntimeMemory*> memories = mMemories.getObjects(); + std::vector<const Memory*> memories = mMemories.getObjects(); std::vector<bool> isUsedAsInput(memories.size(), false); std::vector<bool> isUsedAsOutput(memories.size(), false); - std::vector<std::unique_ptr<RuntimeMemory>> blobAhwbs; + std::vector<std::unique_ptr<Memory>> blobAhwbs; // Mark the input and output usages. for (auto& input : mInputs) { @@ -1562,7 +1163,7 @@ if (mMemories[poolIndex]->getValidator().createdWithUnknownShape()) { LOG(ERROR) << "Cannot fallback to CPU because at least one of the output operands " "has unknown shape."; - return {ANEURALNETWORKS_OP_FAILED, {}, {}}; + return {ANEURALNETWORKS_OP_FAILED, {}, kNoTiming}; } isUsedAsOutput[poolIndex] = true; } @@ -1570,17 +1171,17 @@ // Allocate BLOB mode AHardwareBuffers and read the data from input device memories. for (uint32_t i = 0; i < memories.size(); i++) { - const RuntimeMemory* memory = mMemories[i]; + const Memory* memory = mMemories[i]; if (memory->getIBuffer() != nullptr) { const uint32_t size = memory->getValidator().getMetadata().logicalSize; auto [nAhwb, blobAhwb] = MemoryRuntimeAHWB::create(size); if (nAhwb != ANEURALNETWORKS_NO_ERROR) { - return {nAhwb, {}, {}}; + return {nAhwb, {}, kNoTiming}; } if (isUsedAsInput[i]) { - n = copyIBufferToMemory(memory->getIBuffer(), blobAhwb->getMemory()); + n = copyIBufferToHidlMemory(memory->getIBuffer(), blobAhwb->getHidlMemory()); if (n != ANEURALNETWORKS_NO_ERROR) { - return {n, {}, {}}; + return {n, {}, kNoTiming}; } } memories[i] = blobAhwb.get(); @@ -1588,23 +1189,18 @@ } } - const MeasureTiming measure = measureTiming(mExecutionBuilder); - const OptionalDuration loopTimeoutDuration = - makeTimeoutDuration(mExecutionBuilder->getLoopTimeoutDuration()); - auto [nExecute, outputShapes, timing] = preparedModel->execute( - mInputs, mOutputs, memories, nullptr, measure, {}, loopTimeoutDuration); - mExecutionBuilder->reportTimingWithoutFencedExecutionCallback(timing); - if (nExecute != ANEURALNETWORKS_NO_ERROR) { - return {nExecute, std::move(outputShapes), timing}; + auto [nCompute, outputShapes, timing] = computeWithMemories({}, memories); + if (nCompute != ANEURALNETWORKS_NO_ERROR) { + return {nCompute, std::move(outputShapes), timing}; } // Write back to output device memories. for (uint32_t i = 0; i < memories.size(); i++) { - const RuntimeMemory* memory = mMemories[i]; + const Memory* memory = mMemories[i]; if (memory->getIBuffer() != nullptr && isUsedAsOutput[i]) { - n = copyMemoryToIBuffer(memories[i]->getMemory(), memory->getIBuffer(), {}); + n = copyHidlMemoryToIBuffer(memories[i]->getHidlMemory(), memory->getIBuffer(), {}); if (n != ANEURALNETWORKS_NO_ERROR) { - return {n, {}, {}}; + return {n, {}, kNoTiming}; } } }
diff --git a/runtime/ExecutionBuilder.h b/runtime/ExecutionBuilder.h index 4b3b190..f61df4c 100644 --- a/runtime/ExecutionBuilder.h +++ b/runtime/ExecutionBuilder.h
@@ -17,21 +17,16 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EXECUTION_BUILDER_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EXECUTION_BUILDER_H -#include <ControlFlow.h> -#include <CpuExecutor.h> -#include <android-base/thread_annotations.h> -#include <nnapi/IBurst.h> -#include <nnapi/IPreparedModel.h> -#include <nnapi/Types.h> -#include <nnapi/Validation.h> - +#include <atomic> #include <memory> -#include <string> #include <tuple> #include <utility> #include <vector> -#include "ExecutionCallback.h" +#include "Callbacks.h" +#include "ControlFlow.h" +#include "CpuExecutor.h" +#include "HalInterfaces.h" #include "Memory.h" #include "ModelArgumentInfo.h" #include "ModelBuilder.h" @@ -43,30 +38,28 @@ class BurstBuilder; class CompilationBuilder; class Device; -class DynamicTemporaries; +class ExecutionBurstController; class ExecutionPlan; class ExecutionStep; +class Memory; class ModelBuilder; -class RuntimeMemory; -class RuntimePreparedModel; -class RuntimeExecution; +class PreparedModel; class StepExecutor; class ExecutionBuilder { friend class StepExecutor; public: - explicit ExecutionBuilder(const CompilationBuilder* compilation); - virtual ~ExecutionBuilder() = default; + ExecutionBuilder(const CompilationBuilder* compilation); int setInput(uint32_t index, const ANeuralNetworksOperandType* type, const void* buffer, size_t length); int setInputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type, - const RuntimeMemory* memory, size_t offset, size_t length); + const Memory* memory, size_t offset, size_t length); int setOutput(uint32_t index, const ANeuralNetworksOperandType* type, void* buffer, size_t length); int setOutputFromMemory(uint32_t index, const ANeuralNetworksOperandType* type, - const RuntimeMemory* memory, size_t offset, size_t length); + const Memory* memory, size_t offset, size_t length); int setMeasureTiming(bool measure); @@ -80,14 +73,10 @@ uint64_t getLoopTimeoutDuration() const { return mLoopTimeoutDuration; } - int enableInputAndOutputPadding(bool enable); - - int setReusable(bool reusable); - int computeFenced(const std::vector<int>& wait_for, uint64_t timeoutDurationAfterFence, int* sync_fence); - int computeAsynchronously(std::shared_ptr<ExecutionCallback>* synchronizationCallback) { + int computeAsynchronously(sp<ExecutionCallback>* synchronizationCallback) { CHECK(synchronizationCallback != nullptr); return compute(synchronizationCallback); } @@ -95,40 +84,34 @@ int burstCompute(BurstBuilder* burst) { return compute(nullptr, burst); } // Initialize output dimensional information from ModelArgumentInfo. - std::vector<OutputShape> getInitialOutputShapes() const; + std::vector<hal::OutputShape> getInitialOutputShapes() const; int getOutputOperandDimensions(uint32_t index, uint32_t* dimensions); int getOutputOperandRank(uint32_t index, uint32_t* rank); // Handshake with lower-level execution support bool measureTiming() const { return mMeasureTiming; } - void reportTimingWithoutFencedExecutionCallback(Timing timing) { + void reportTimingWithoutFencedExecutionCallback(hal::Timing timing) { mTimingWithoutFencedExecutionCallback = timing; } const CompilationBuilder* getCompilation() const { return mCompilation; } const ModelBuilder* getModel() const { return mModel; } const ModelBuilder* getSourceModel(uint32_t index) const; - const Operand& getSourceOperand(const std::pair<uint32_t, uint32_t>& sourceOperandIndex) const { + const hal::Operand& getSourceOperand( + const std::pair<uint32_t, uint32_t>& sourceOperandIndex) const { return getSourceModel(sourceOperandIndex.first)->getOperand(sourceOperandIndex.second); } - // This method will be called at the end of all computation paths to change the state - // of the execution object and update output shapes / memories. - int finishComputation(int result, const std::vector<OutputShape>& outputShapes); - ErrorStatus finishComputation(ErrorStatus error, const std::vector<OutputShape>& outputShapes) { - const int result = finishComputation(convertErrorStatusToResultCode(error), outputShapes); - return convertResultCodeToErrorStatus(result); - } + hal::ErrorStatus finishWithoutSyncFence(hal::ErrorStatus error, + const std::vector<hal::OutputShape>& outputShapes); - const ExecuteFencedInfoCallback& getExecuteFencedInfoCallback() { + // Retrieve a reference to the IFencedExecutionCallback callback. + const sp<hal::IFencedExecutionCallback>& getFencedExecutionCallback() { return mFencedExecutionCallback; } - bool inFlight() const { - std::lock_guard<std::mutex> lock(mStateMutex); - return mState == State::COMPUTATION; - } + bool inFlight() const { return mStarted && !isFinished(); } const ModelArgumentInfo& getInputInfo(uint32_t index) const { return mInputs[index]; } const ModelArgumentInfo& getOutputInfo(uint32_t index) const { return mOutputs[index]; } @@ -137,7 +120,7 @@ return mMemories[poolIndex]->getRunTimePoolInfo(); } - protected: + private: // If a callback is provided, then this is asynchronous. If a callback is // not provided (i.e., is nullptr), then this is synchronous. // @@ -145,38 +128,29 @@ // provided (i.e., is nullptr), then a synchronous execution will occur. // // Providing both synchronizationCallback and burstBuilder is an error. - int compute(std::shared_ptr<ExecutionCallback>* synchronizationCallback, + int compute(sp<ExecutionCallback>* synchronizationCallback, BurstBuilder* burstBuilder = nullptr); - virtual std::tuple<int, std::vector<OutputShape>, Timing> computeInternal( - const OptionalTimePoint& deadline, BurstBuilder* burstBuilder) = 0; - - virtual std::tuple<int, int, ExecuteFencedInfoCallback> computeFencedInternal( - const std::vector<int>& waitFor, uint64_t timeoutDurationAfterFence, - const OptionalTimePoint& deadline) = 0; - - // This method handles the common preparation and validation logic of compute and computeFenced. - // It will be called at the start of every computation. - int prepareForCompute(const char* name); - const CompilationBuilder* mCompilation; // Update output dimensional information from OutputShape to ModelArgumentInfo. - bool updateOutputShapes(ErrorStatus status, const std::vector<OutputShape>& outputShapes); + bool updateOutputShapes(const std::vector<hal::OutputShape>& outputShapes); bool updateMemories(); + bool hasSyncFence() const { return mSyncFenceFd > 0; } + const ModelBuilder* mModel; const ExecutionPlan* mPlan; - // Whether CPU fallback is allowed based on the value of DeviceManager::kPartitioning* captured - // from CompilationBuilder when the ExecutionBuilder is constructed. - bool mAllowCpuFallback; + // This is a DeviceManager::kPartitioning* value captured from + // CompilationBuilder when the ExecutionBuilder is constructed. + uint32_t mPartitioning; // The information we'll send to the driver about the inputs and outputs. // Note that we build this in two steps: // 1. As the arguments are specified, set the corresponding mInputs or mOutputs element. - // If set from a pointer, don't set the location in the Request::Argument but store it + // If set from a pointer, don't set the location in the RequestArgument but store it // instead in mInputBuffers or mOutputBuffers. // 2. Once we have all the inputs and outputs, if needed, allocate shared memory for // the m*Buffers entries. Copy the input values into the shared memory. @@ -192,7 +166,7 @@ // Timing reported from the driver. This field is only used if // mFencedExecutionCallback is nullptr. - Timing mTimingWithoutFencedExecutionCallback = {}; + hal::Timing mTimingWithoutFencedExecutionCallback = {}; // Amount of time to complete or abort the execution. std::optional<uint64_t> mTimeoutDuration; @@ -200,46 +174,29 @@ // Amount of time to complete or abort a loop. uint64_t mLoopTimeoutDuration = operation_while::kTimeoutNsDefault; - // The state of the execution. - // Properties can only been set when the execution is in the state State::PREPARATION. - // Timing and output shapes can only be queried when the execution is in the state - // State::COMPLETED. - enum class State { PREPARATION, COMPUTATION, COMPLETED }; - State mState GUARDED_BY(mStateMutex) = State::PREPARATION; - bool computationStarted() const { - std::lock_guard<std::mutex> lock(mStateMutex); - return mState != State::PREPARATION; - } - bool completed() const { - std::lock_guard<std::mutex> lock(mStateMutex); - return mState == State::COMPLETED; - } + // Properties cannot be set once the execution has started. + std::atomic_bool mStarted = false; - // Mutex to guard mState. Note that this not strictly needed because we provide - // no thread-safety guarantee to the ANeuralNetworksExecution object. - mutable std::mutex mStateMutex; + // Timing and output shapes can only be queried after the execution is + // finished. This field only becomes true if !hasSyncFence(). + // See isFinished(). + std::atomic_bool mFinishedWithoutSyncFence = false; - // Return false if the execution is in a bad state for starting computation. - // Otherwise, return true and set the state to State::COMPUTATION. - bool checkAndSetComputationState(const char* name); + bool isFinished() const; - // With what error status has execution completed? + // With what error status has execution completed? This field only takes on + // a meaningful value if !hasSyncFence(). + // See completedWith(). enum class Completion { NO_ERROR, OUTPUT_INSUFFICIENT_SIZE, OTHER_ERROR }; - Completion mCompletion = Completion::OTHER_ERROR; - Completion completedWith() const { - CHECK(completed()); - return mCompletion; - } + Completion mCompletionWithoutSyncFence = Completion::OTHER_ERROR; - // The result code of request validation. - // It is only evaluated once at the first time it's needed. - std::optional<int> mValidationResultCode; - int getValidationResultCode(); + // With what error status has execution completed? Must only be called if + // isFinished(). + Completion completedWith() const; - // Does every tensor output operand of the model have a fully specified shape? - // It is only evaluated once at the first time it's needed. - std::optional<bool> mOutputsFullySpecified; - bool areOutputsFullySpecified(); + // The sync fence fd that is created in the computeFenced call, if any. + // (Sometimes no sync fence fd will be created.) + int mSyncFenceFd = -1; // The callback used to query execution related info in the case of fenced // execution; otherwise, nullptr. If the execution plan has multiple steps, @@ -247,46 +204,7 @@ // doesn't support fenced execution (e.g., the driver is too old), or if the // launch of execution on the driver fails, then this callback will be // nullptr. - ExecuteFencedInfoCallback mFencedExecutionCallback; - - // Whether set{Input,Output}[FromMemory] can accept padded length or not. - bool mInputAndOutputPaddingEnabled = false; - - // enableInputAndOutputPadding may only be called before any call of - // set{Input,Output}[FromMemory] - bool mHasCalledSetInputOutput = false; - - // Can compute APIs be invoked multiple times on the execution object? - bool mReusable = false; -}; - -// For execution plan with a SIMPLE body, i.e. the whole model will be executed on a single device. -class SimpleExecutionBuilder : public ExecutionBuilder { - public: - SimpleExecutionBuilder(const CompilationBuilder* compilation); - - std::tuple<int, std::vector<OutputShape>, Timing> computeInternal( - const OptionalTimePoint& deadline, BurstBuilder* burstBuilder) override; - - std::tuple<int, int, ExecuteFencedInfoCallback> computeFencedInternal( - const std::vector<int>& waitFor, uint64_t timeoutDurationAfterFence, - const OptionalTimePoint& deadline) override; - - private: - std::shared_ptr<StepExecutor> mExecutor; -}; - -// For execution plan with a COMPOUND body, i.e. partitioned execution with multiple steps. -class CompoundExecutionBuilder : public ExecutionBuilder { - public: - CompoundExecutionBuilder(const CompilationBuilder* compilation); - - std::tuple<int, std::vector<OutputShape>, Timing> computeInternal( - const OptionalTimePoint& deadline, BurstBuilder* burstBuilder) override; - - std::tuple<int, int, ExecuteFencedInfoCallback> computeFencedInternal( - const std::vector<int>& waitFor, uint64_t timeoutDurationAfterFence, - const OptionalTimePoint& deadline) override; + sp<hal::IFencedExecutionCallback> mFencedExecutionCallback; }; // class StepExecutor is used to execute a single "step" in a @@ -302,28 +220,15 @@ // "step" model of a multiple-"step" executionBuilder. // driver, preparedModel // The device on which to execute the "step", and the prepared - // model to execute on that device. For non-fallback StepExecutor, - // neither is nullptr; for fallback StepExecutor, both are ignored in - // StepExecutor::computeOnCpuFallback and may be nullptr. - // reusable - // If true, multiple StepExecutor::compute/computeFenced may be called on this - // object; otherwise, only one StepExecutor::compute/computeFenced may be called. - // reusable must be false if mDynamicTemporaries != nullptr. + // model to execute on that device. (Both are nullptr in the + // case of CPU.) // step // Contains the output index mapping from the excerpted "step" model to // main model if the execution has multiple "steps". Must be nullptr // otherwise. - // (step == nullptr) == (dynamicTemporaries == nullptr) - // dynamicTemporaries - // If the execution has multiple "steps", describes the temporaries - // of source models that do not have fully specified types and are outputs - // of "step" models. Must be nullptr otherwise. - // (step == nullptr) == (dynamicTemporaries == nullptr) StepExecutor(ExecutionBuilder* executionBuilder, const ModelBuilder* model, - std::shared_ptr<Device> device, - std::shared_ptr<RuntimePreparedModel> preparedModel, bool reusable, - const ExecutionStep* step = nullptr, - DynamicTemporaries* dynamicTemporaries = nullptr); + std::shared_ptr<Device> device, std::shared_ptr<PreparedModel> preparedModel, + const ExecutionStep* step = nullptr); // Map inputs and outputs from ExecutionBuilder to StepExecutor, // in the case where we have a single-"step" execution (i.e., the executor @@ -331,113 +236,77 @@ void mapInputsAndOutputsTrivially(); // Update output shapes with shapes returned from execution. - struct UpdateOutputShapes { - // These fields are meaningless unless updateOutputShapes() returns true - bool updatedDynamicTemporary; // did shape (dimensions, size) information change for at - // least one dynamic temporary? - bool mainOutputInsufficient; // is at least one main model output written by this execution - // marked !isSufficient? - bool zeroSizedInput; // is at least one output of this execution step a zero-sized tensor - // that needs to be read by some other step of the same execution? - }; - bool updateOutputShapes(int executionResultCode, const std::vector<OutputShape>& from, - std::vector<OutputShape>* to, UpdateOutputShapes* update); + bool updateOutputShapes(const std::vector<hal::OutputShape>& from, + std::vector<hal::OutputShape>* to); // Map inputs and outputs from ExecutionBuilder to StepExecutor, // one at a time. Note that these are input/output indexes, not // operand indexes. - // - // For mapOutputToInput(), outputDimensions may be nullptr if the input - // operand has fully specified dimensions. void mapInput(uint32_t builderIndex, uint32_t executorIndex) { mapInputOrOutput(mExecutionBuilder->mInputs[builderIndex], &mInputs[executorIndex]); } void mapOutput(uint32_t builderIndex, uint32_t executorIndex) { mapInputOrOutput(mExecutionBuilder->mOutputs[builderIndex], &mOutputs[executorIndex]); } - void mapOutputToInput(uint32_t builderIndex, uint32_t executorIndex, - const Dimensions* outputDimensions) { - mapInputOrOutput(mExecutionBuilder->mOutputs[builderIndex], &mInputs[executorIndex], - outputDimensions); + void mapOutputToInput(uint32_t builderIndex, uint32_t executorIndex) { + mapInputOrOutput(mExecutionBuilder->mOutputs[builderIndex], &mInputs[executorIndex]); } - // dimensions must either have zero rank or must be - // consistent with and at least as well specified as operand dimensions - // (i.e., either rank must match, or operand rank must be zero; and for each - // individual dimension, either dimension must match, or operand dimension - // must be zero). - int setInputFromMemory(uint32_t inputIndex, const RuntimeMemory* memory, uint32_t offset, - uint32_t length, const Dimensions& dimensions = {}) { + // The input or output is assumed to have the size of the + // corresponding operand. + int setInputFromMemory(uint32_t inputIndex, const Memory* memory, uint32_t offset) { return setInputOrOutputFromMemory(mModel->getInputOperand(inputIndex), memory, offset, - length, dimensions, &mInputs.at(inputIndex)); + &mInputs.at(inputIndex)); } - int setOutputFromMemory(uint32_t outputIndex, const RuntimeMemory* memory, uint32_t offset, - uint32_t length, const Dimensions& dimensions = {}) { + int setOutputFromMemory(uint32_t outputIndex, const Memory* memory, uint32_t offset) { return setInputOrOutputFromMemory(mModel->getOutputOperand(outputIndex), memory, offset, - length, dimensions, &mOutputs.at(outputIndex)); + &mOutputs.at(outputIndex)); } // Executes using the (driver, preparedModel) specified at construction time. - std::tuple<int, std::vector<OutputShape>, Timing> compute( - const OptionalTimePoint& deadline, const SharedBurst& burstController = nullptr); + std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> compute( + const std::optional<Deadline>& deadline, + const std::shared_ptr<ExecutionBurstController>& burstController = nullptr); // Re-compiles and executes using the CPU, regardless of the (driver, // preparedModel) specified at construction time. - std::tuple<int, std::vector<OutputShape>, Timing> computeOnCpuFallback(); + std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> computeOnCpuFallback(); bool isCpu() const; // Perform fenced execution and return error_code, sync_fence_fd and a // callback. - std::tuple<int, int, ExecuteFencedInfoCallback> computeFenced( + std::tuple<int, int, sp<hal::IFencedExecutionCallback>> computeFenced( const std::vector<int>& wait_for, uint64_t timeoutDurationAfterFence, - const OptionalTimePoint& deadline); - - // Do the dynamic temporaries defined by this step have valid allocations? - // (true if there are no dynamic temporaries defined by this step.) - bool areDynamicTemporariesAllocated() const; + const std::optional<Deadline>& deadline); private: - // builderDimensions may be nullptr if executorInputOrOutput has fully - // specified dimensions. void mapInputOrOutput(const ModelArgumentInfo& builderInputOrOutput, - ModelArgumentInfo* executorInputOrOutput, - const Dimensions* builderDimensions = nullptr); + ModelArgumentInfo* executorInputOrOutput); - // dimensions must either have zero rank or - // must be consistent with and at least as well specified as operand - // dimensions (i.e., either rank must match, or operand rank must be zero; - // and for each individual dimension, either dimension must match, or - // operand dimension must be zero). - int setInputOrOutputFromMemory(const Operand& inputOrOutputOperand, const RuntimeMemory* memory, - uint32_t offset, uint32_t length, const Dimensions& dimensions, - ModelArgumentInfo* inputOrOutputInfo); + int setInputOrOutputFromMemory(const hal::Operand& inputOrOutputOperand, const Memory* memory, + uint32_t offset, ModelArgumentInfo* inputOrOutputInfo); + + std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> computeWithMemories( + const std::optional<Deadline>& deadline, const std::vector<const Memory*>& memories, + const std::shared_ptr<ExecutionBurstController>& burstController = nullptr); // describes the full (possibly multiple-"step") execution ExecutionBuilder* mExecutionBuilder; // describes the single execution step - const ExecutionStep* mExecutionStep; - - // describes the dynamic temporaries - DynamicTemporaries* mDynamicTemporaries; + const ExecutionStep* mExecutionStep = nullptr; // model to be executed on the executor, in both original and // compiled forms; and device on which to execute it const ModelBuilder* mModel; std::shared_ptr<Device> mDevice; - std::shared_ptr<RuntimePreparedModel> mPreparedModel; - - // The reusable execution to launch multiple computations. - // It is only created once at the first time it's needed. - std::shared_ptr<RuntimeExecution> mExecution; - // Returns {NO_ERROR, execution} on success, or {result_code, nullptr} on failure. - std::pair<int, std::shared_ptr<RuntimeExecution>> getReusableExecution(); + std::shared_ptr<PreparedModel> mPreparedModel; // The information we'll send to the driver about the inputs and outputs. // Note that we build this in two steps: // 1. As the arguments are specified, set the corresponding mInputs or mOutputs element. - // If set from a pointer, don't set the location in the Request::Argument but store it + // If set from a pointer, don't set the location in the RequestArgument but store it // instead in mInputBuffers or mOutputBuffers. // 2. Once we have all the inputs and outputs, if needed, allocate shared memory for // the m*Buffers entries. Copy the input values into the shared memory. @@ -447,13 +316,8 @@ std::vector<ModelArgumentInfo> mInputs; std::vector<ModelArgumentInfo> mOutputs; MemoryTracker mMemories; - - // Whether compute/computeFenced may be invoked multiple times. - bool mReusable = false; }; -std::string toString(StepExecutor::UpdateOutputShapes updateOutputShapes); - } // namespace nn } // namespace android
diff --git a/runtime/ExecutionCallback.cpp b/runtime/ExecutionCallback.cpp deleted file mode 100644 index b2ac2ae..0000000 --- a/runtime/ExecutionCallback.cpp +++ /dev/null
@@ -1,165 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "ExecutionCallback" - -#include "ExecutionCallback.h" - -#include <android-base/logging.h> - -#include <limits> -#include <utility> -#include <vector> - -namespace android::nn { - -void ExecutionCallback::notify(ErrorStatus status, const std::vector<OutputShape>& outputShapes, - const Timing& timing) { - notifyInternal(status, outputShapes, timing); -} - -void ExecutionCallback::wait() const { - std::unique_lock<std::mutex> lock(mMutex); - mCondition.wait(lock, [this] { return mNotified; }); - - /* - * Note that we cannot call std::thread::join from ExecutionCallback's - * destructor: ExecutionCallback is intended to be reference counted, and it - * is possible that the reference count drops to zero in the bound thread, - * causing the bound thread to call this destructor. If a thread tries to - * join itself, it throws an exception, producing a message like the - * following: - * - * terminating with uncaught exception of type std::__1::system_error: - * thread::join failed: Resource deadlock would occur - */ - if (mThread.joinable()) { - mThread.join(); - } -} - -ErrorStatus ExecutionCallback::getStatus() const { - wait(); - return mErrorStatus; -} - -const std::vector<OutputShape>& ExecutionCallback::getOutputShapes() const { - wait(); - return mOutputShapes; -} - -Timing ExecutionCallback::getTiming() const { - wait(); - return mTiming; -} - -bool ExecutionCallback::bindThread(std::thread asyncThread) { - std::lock_guard<std::mutex> lock(mMutex); - - // Ensure ExecutionCallback object does not already have a thread bound - if (mThread.joinable()) { - LOG(ERROR) << "ExecutionCallback::bindThread -- a thread has already been bound to this " - "callback object"; - return false; - } - - // Ensure the new thread is valid - if (!asyncThread.joinable()) { - LOG(ERROR) << "ExecutionCallback::bindThread -- the new thread is not joinable"; - return false; - } - - mThread = std::move(asyncThread); - return true; -} - -void ExecutionCallback::setOnFinish(const ExecutionFinish& finish) { - std::lock_guard<std::mutex> hold(mMutex); - - // Ensure ExecutionCallback object does not already have a "finish" callback - if (mOnFinish != nullptr) { - LOG(ERROR) << "ExecutionCallback::setOnFinish -- object already has a \"finish\" callback"; - return; - } - - // Ensure new "finish" callback is valid - if (finish == nullptr) { - LOG(ERROR) << "ExecutionCallback::setOnFinish -- \"finish\" callback is invalid"; - return; - } - - // Essure ExecutionCallback object has not already been notified - if (mNotified) { - LOG(ERROR) << "ExecutionCallback::setOnFinish -- ExecutionCallback has already been " - "notified with results"; - return; - } - - mOnFinish = finish; -} - -void ExecutionCallback::notifyInternal(ErrorStatus errorStatus, - std::vector<OutputShape> outputShapes, Timing timing) { - // check results - { - if (errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - // outputShapes must not be empty if OUTPUT_INSUFFICIENT_SIZE. - if (outputShapes.size() == 0) { - LOG(ERROR) - << "Notified with empty output shape vector when OUTPUT_INSUFFICIENT_SIZE"; - errorStatus = ErrorStatus::GENERAL_FAILURE; - outputShapes = {}; - timing = {}; - } - } else if (errorStatus != ErrorStatus::NONE) { - // outputShapes must be empty if errorStatus is neither NONE nor - // OUTPUT_INSUFFICIENT_SIZE. - if (outputShapes.size() != 0) { - LOG(ERROR) << "Notified with non-empty output shape vector when error status is " - "neither NONE nor OUTPUT_INSUFFICIENT_SIZE"; - errorStatus = ErrorStatus::GENERAL_FAILURE; - outputShapes = {}; - timing = {}; - } - } - } - - // store results - { - std::lock_guard<std::mutex> hold(mMutex); - - // quick-return if object has already been notified - if (mNotified) { - return; - } - - mErrorStatus = errorStatus; - mOutputShapes = std::move(outputShapes); - mTiming = timing; - mNotified = true; - - if (mOnFinish != nullptr) { - ErrorStatus status = mOnFinish(mErrorStatus, mOutputShapes); - mOnFinish = nullptr; - if (status != ErrorStatus::NONE) { - mErrorStatus = status; - } - } - } - mCondition.notify_all(); -} - -} // namespace android::nn
diff --git a/runtime/ExecutionCallback.h b/runtime/ExecutionCallback.h deleted file mode 100644 index 2f19dbd..0000000 --- a/runtime/ExecutionCallback.h +++ /dev/null
@@ -1,224 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EXECUTION_CALLBACK_H -#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EXECUTION_CALLBACK_H - -#include <android-base/thread_annotations.h> -#include <nnapi/Types.h> - -#include <condition_variable> -#include <functional> -#include <mutex> -#include <thread> -#include <vector> - -namespace android::nn { - -// This class used to be a HIDL callback class to receive the results of -// IDevice::execute* asynchronously. It's not used for this anymore. -// -// TODO(b/122316159): Replace ExecutionCallback and CallbackEvent with a new -// class like AsyncTaskEvent. -/** - * The ExecutionCallback class is used to receive the results of the execution - * from a task executing asynchronously with respect to the runtime. If a - * calling thread calls wait or get* on a ExecutionCallback object and the - * corresponding asynchronous task has not finished the execution, the calling - * thread will block until the asynchronous task has called one of the notify* - * methods. - * - * If the callback object is notified more than once, only the results of the - * first call to notify* are used, and the results from subsequent calls are - * discarded. - */ -class ExecutionCallback { - using ExecutionFinish = - std::function<ErrorStatus(ErrorStatus, const std::vector<OutputShape>&)>; - - public: - /** - * ExecutionCallback::notify marks the callback object with the results - * (error status, dynamic output shapes, and timing information) of the - * asynchronous execution that held this callback and enables all prior and - * future wait calls on the ExecutionCallback object to proceed. - * - * If the callback object is notified more than once, only the results of - * the first call to notify* are used, and the results from subsequent calls - * are discarded. - * - * @param status Error status returned from launching the asynchronous task - * (if the launch fails) or from the asynchronous task itself (if the - * launch succeeds). Must be: - * - NONE if the asynchronous execution was successful - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if the asynchronous task resulted in an unspecified - * error - * - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is - * not large enough to store the corresponding output - * - INVALID_ARGUMENT if one of the input arguments to prepareModel is - * invalid - * - MISSED_DEADLINE_* if the deadline could not be met - * - RESOURCE_EXHAUSTED_* if the execution was aborted by the driver - * @param outputShapes A list of shape information of model output operands. - * The index into "outputShapes" corresponds to the index of the output - * operand in the Request outputs vector. outputShapes must be empty - * unless the status is either NONE or OUTPUT_INSUFFICIENT_SIZE. - * @param Timing Duration of execution. Unless MeasureTiming::YES was passed - * when launching the execution and status is NONE, all times must be - * reported as UINT64_MAX. A driver may choose to report any time as - * UINT64_MAX, indicating that particular measurement is not available. - */ - void notify(ErrorStatus status, const std::vector<OutputShape>& outputShapes, - const Timing& timing); - - /** - * ExecutionCallback::wait blocks until notify* has been called on the - * callback object. - */ - void wait() const; - - /** - * Retrieves the error status returned from the asynchronous task launched - * by IPreparedModel::execute* (but not by - * IPreparedModel::executeSynchronously*). If IPreparedModel::execute* has - * not finished asynchronously executing, this call will block until the - * asynchronous task notifies the object. - * - * @return status Error status returned from launching the asynchronous task - * (if the launch fails) or from the asynchronous task itself (if the - * launch succeeds). Must be: - * - NONE if the asynchronous execution was successful - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if the asynchronous task resulted in an unspecified - * error - * - OUTPUT_INSUFFICIENT_SIZE if at least one output operand buffer is - * not large enough to store the corresponding output - * - INVALID_ARGUMENT if one of the input arguments to prepareModel is - * invalid - * - MISSED_DEADLINE_* if the deadline could not be met - * - RESOURCE_EXHAUSTED_* if the task was aborted by the driver - * - DEAD_OBJECT if the driver crashed without returning a result - */ - ErrorStatus getStatus() const; - - /** - * Retrieves the output shapes returned from the asynchronous task launched - * by either IPreparedModel::execute_1_2 or IPreparedModel::execute_1_3. If - * IPreparedModel::execute_1_2 or IPreparedModel::execute_1_3 has not - * finished asynchronously executing, this call will block until the - * asynchronous task notifies the object. - * - * If the asynchronous task was launched by IPreparedModel::execute, an - * empty vector will be returned. - * - * @return outputShapes A list of shape information of model output - * operands. The index into "outputShapes" corresponds to the index of - * the output operand in the Request outputs vector. outputShapes must - * be empty unless the status is either NONE or - * OUTPUT_INSUFFICIENT_SIZE. outputShaps may be empty if the status is - * NONE and all model output operands are fully-specified at execution - * time. outputShapes must have the same number of elements as the - * number of model output operands if the status is - * OUTPUT_INSUFFICIENT_SIZE, or if the status is NONE and the model has - * at least one output operand that is not fully-specified. - */ - const std::vector<OutputShape>& getOutputShapes() const; - - /** - * Retrieves the duration of execution of the asynchronous task launched by - * by either IPreparedModel::execute_1_2 or IPreparedModel::execute_1_3. If - * IPreparedModel::execute_1_2 or IPreparedModel::execute_1_3 has not - * finished asynchronously executing, this call will block until the - * asynchronous task notifies the object. - * - * If the asynchronous task was launched by IPreparedModel::execute, every - * time must be UINT64_MAX. - * - * @return timing Duration of the execution. Every time must be UINT64_MAX - * unless the status is NONE. - */ - Timing getTiming() const; - - /** - * ExecutionCallback::bindThread binds a thread to the ExecutionCallback - * object. The bound thread is later joined by ExecutionCallback::wait or - * ExecutionCallback::get*. - * - * Once a thread is bound with ExecutionCallback::bindThread, the client - * code must ensure that ExecutionCallback::wait or ExecutionCallback::get* - * has been called before the ExecutionCallback object is destroyed. - * - * The bound thread must not call any ExecutionCallback method with the - * exception of ExecutionCallback::notify*, which it must call when the - * thread has finished its computation. - * - * ExecutionCallback::bindThread can be called at most once on a given - * callback object. - * - * @param asyncThread Thread to be bound to the callback object. The thread - * object must represent a thread of execution -- i.e., - * std::thread::joinable() must be true. - * @return bool True if successful, false if thread was not properly bound. - */ - bool bindThread(std::thread asyncThread); - - /** - * ExecutionCallback::setOnFinish binds a callback to the ExecutionCallback - * object that will be executed during one of the ExecutionCallback::notify* - * calls but before any calls to wait or get* return. This provided callback - * is provided with both the ErrorStatus and the output shapes from - * ExecutionCallback::notify*. - * - * The bound function must not synchronize with or otherwise access the - * callback object it is bound to, as this could cause a deadlock. - * - * This call will not bind the provided callback if any of the following - * occur: - * (1) the provided callback is invalid (i.e., "(bool) finish" is false) - * (2) ExecutionCallback already contains a bound callback - * (3) ExecutionCallback has already been notified with results - * - * @param finish Callback to be executed when ExecutionCallback is notified - * with results. - */ - void setOnFinish(const ExecutionFinish& finish); - - private: - /* - * ExecutionCallback::notifyInternal stores the results of the execution - * (status, output shapes, and timing information) in the ExecutionCallback - * object and invokes the bound callback function "mOnFinish" (if present) - * before any call to wait or get* return. It then enables all prior and - * future wait calls on the ExecutionCallback object to proceed. - */ - void notifyInternal(ErrorStatus errorStatus, std::vector<OutputShape> outputShapes, - Timing timing); - - // members - mutable std::mutex mMutex; - mutable std::condition_variable mCondition; - mutable std::thread mThread GUARDED_BY(mMutex); - ExecutionFinish mOnFinish GUARDED_BY(mMutex); - bool mNotified GUARDED_BY(mMutex) = false; - ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE; - std::vector<OutputShape> mOutputShapes; - Timing mTiming = {}; -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EXECUTION_CALLBACK_H
diff --git a/runtime/ExecutionPlan.cpp b/runtime/ExecutionPlan.cpp index afb39ae..1797475 100644 --- a/runtime/ExecutionPlan.cpp +++ b/runtime/ExecutionPlan.cpp
@@ -18,16 +18,9 @@ #include "ExecutionPlan.h" -#include <ControlFlow.h> -#include <CpuExecutor.h> -#include <GraphDump.h> -#include <LegacyUtils.h> -#include <MetaModel.h> -#include <OperationsUtils.h> -#include <TokenHasher.h> -#include <Tracing.h> +#include <android/sync.h> #include <fcntl.h> -#include <nnapi/IBurst.h> +#include <openssl/sha.h> #include <sys/stat.h> #include <sys/types.h> @@ -45,31 +38,41 @@ #include <vector> #include "BurstBuilder.h" +#include "Callbacks.h" #include "CompilationBuilder.h" +#include "ControlFlow.h" +#include "CpuExecutor.h" #include "ExecutionBuilder.h" -#include "ExecutionCallback.h" +#include "ExecutionBurstController.h" +#include "GraphDump.h" #include "Manager.h" +#include "MetaModel.h" #include "ModelBuilder.h" +#include "OperationsUtils.h" +#include "TokenHasher.h" +#include "Tracing.h" #include "TypeManager.h" +#include "Utils.h" namespace android { namespace nn { namespace { +using namespace hal; + // The index of the main model in SourceModels. constexpr uint32_t kMainModelInSourceModels = 0; -constexpr uint32_t kNoPadding = 1; - // Compiles the model on device. // If compilation caching is available, depending on ExecutionPlan::mState, the token may only have // been initialized by the user provided token (SIMPLE body), or is already re-hashed by the // operation indices to be executed (COMPOUND body). The token will be re-hashed further by the // device name, device version string, and the execution preference in this function. int compile(const Device& device, const ModelBuilder& model, int executionPreference, - int compilationPriority, const OptionalTimePoint& deadline, const CacheInfo& cacheInfo, - TokenHasher* token, std::shared_ptr<RuntimePreparedModel>* preparedModel) { + int compilationPriority, const std::optional<Deadline>& deadline, + const std::string& cacheDir, TokenHasher* token, + std::shared_ptr<PreparedModel>* preparedModel) { CHECK(token != nullptr); CHECK(preparedModel != nullptr); *preparedModel = nullptr; @@ -80,16 +83,14 @@ token->updateFromString(device.getVersionString().c_str()) && token->update(&executionPreference, sizeof(executionPreference)) && token->update(&compilationPriority, sizeof(compilationPriority)) && token->finish()) { - cacheToken = CacheToken{}; - const uint8_t* tokenPtr = token->getCacheToken(); - std::copy(tokenPtr, tokenPtr + cacheToken->size(), cacheToken->begin()); + cacheToken.emplace(token->getCacheToken()); } - const ModelFactory makeModel = [&model] { return model.makeModel(); }; + const ModelFactory makeModel = [&model] { return model.makeHidlModel(); }; const ExecutionPreference preference = static_cast<ExecutionPreference>(executionPreference); - const Priority priority = convertToCanonicalPriority(compilationPriority); + const Priority priority = convertToHalPriority(compilationPriority); const auto [n, returnedPreparedModel] = - device.prepareModel(makeModel, preference, priority, deadline, cacheInfo, cacheToken); + device.prepareModel(makeModel, preference, priority, deadline, cacheDir, cacheToken); *preparedModel = returnedPreparedModel; return n; } @@ -99,24 +100,27 @@ int copyOperandExtraParams(ModelBuilder& model, uint32_t toOperandIndex, const Operand& fromOperand) { if (fromOperand.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL && - std::holds_alternative<Operand::SymmPerChannelQuantParams>(fromOperand.extraParams)) { - auto& fromChannelQuant = - std::get<Operand::SymmPerChannelQuantParams>(fromOperand.extraParams); + fromOperand.extraParams.getDiscriminator() == + OperandExtraParams::hidl_discriminator::channelQuant) { + auto& fromChannelQuant = fromOperand.extraParams.channelQuant(); ANeuralNetworksSymmPerChannelQuantParams toChannelQuant = { .channelDim = fromChannelQuant.channelDim, .scaleCount = static_cast<uint32_t>(fromChannelQuant.scales.size()), .scales = fromChannelQuant.scales.data(), }; return model.setOperandSymmPerChannelQuantParams(toOperandIndex, toChannelQuant); - } else if (isExtension(fromOperand.type) && - std::holds_alternative<Operand::ExtensionParams>(fromOperand.extraParams)) { - auto extensionData = std::get<Operand::ExtensionParams>(fromOperand.extraParams); + } else if (isExtensionOperandType(fromOperand.type) && + fromOperand.extraParams.getDiscriminator() == + OperandExtraParams::hidl_discriminator::extension) { + hidl_vec<uint8_t> extensionData = fromOperand.extraParams.extension(); return model.setOperandExtensionData(toOperandIndex, extensionData.data(), extensionData.size()); - } else if (!std::holds_alternative<Operand::NoParams>(fromOperand.extraParams) || + } else if (fromOperand.extraParams.getDiscriminator() != + OperandExtraParams::hidl_discriminator::none || fromOperand.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - LOG(ERROR) << "Type " << fromOperand.type - << " has an unexpected extraParams variant: " << fromOperand.extraParams.index(); + LOG(ERROR) << "Type " << toString(fromOperand.type) + << " has an unexpected extraParams discriminator: " + << static_cast<int>(fromOperand.extraParams.getDiscriminator()); return ANEURALNETWORKS_BAD_DATA; } else { return ANEURALNETWORKS_NO_ERROR; @@ -150,8 +154,8 @@ uint32_t count = 0; for (uint32_t operandIndex : operation.inputs) { auto lifetime = mModel->getOperand(operandIndex).lifetime; - if (lifetime == Operand::LifeTime::TEMPORARY_VARIABLE || - lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT) { + if (lifetime == OperandLifeTime::TEMPORARY_VARIABLE || + lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { count++; mOperandToOperations.emplace(operandIndex, operationIndex); } @@ -177,230 +181,8 @@ } } -StaticTemporaryLocation addTemporary(uint32_t* totalSizeOfTemporaries, uint32_t size, - uint32_t alignment, uint32_t padding) { - // TODO: what about overflow? - *totalSizeOfTemporaries = roundUp(*totalSizeOfTemporaries, alignment); - const uint32_t offset = *totalSizeOfTemporaries; - size = roundUp(size, padding); - *totalSizeOfTemporaries += size; - return {.offset = offset, .paddedLength = size}; -}; - -std::string toString(SourceOperandIndex sourceOperandIndex) { - return "(" + std::to_string(sourceOperandIndex.first) + ", " + - std::to_string(sourceOperandIndex.second) + ")"; -}; - -// A helper class to analyze the step roles of all partition boundary operands. -// -// To use, call StepRoleAnalyzer::analyze and pass in a setup function that configures the analyzer -// with the following two methods: -// - addRole: Add a step role to a boundary operand -// - setUsedBy: Specify that the memory of the "source" operand may be directly used by the "dest" -// operand. All of the step roles of the "dest" operand are also possible step roles of the -// "source" operand. This is useful for interpreted control flow, e.g., the outer input operand -// of an interpreted IF operation may be directly used as all step roles of the corresponding -// input operand of the then and else models. Note that this relationship is directional -- -// (A->B && B->C) implies A->C, but (A->C && B->C) does not imply A->B or B->A (A->B is a -// shorthand for setUsedBy(A, B)). The setup function must guarantee that the final graph -// produced by the used-by relationship is acyclic. This is true for the partitioner algorithm -// because there must be a root operand of each step role for the memory to be allocated on -// behalf of. -// -class StepRoleAnalyzer { - public: - static std::map<SourceOperandIndex, std::set<StepRole>> analyze( - const std::function<void(StepRoleAnalyzer&)>& setup) { - StepRoleAnalyzer analyzer; - setup(analyzer); - return analyzer.finish(); - } - - void addRole(const ExecutionStep& step, uint32_t operandIndex, IOType type, - uint32_t stepIOIndex) { - SourceOperandIndex source = {step.getSourceModelIndex(), operandIndex}; - mRoles[source].emplace(step.getIndex(), type, stepIOIndex); - } - - void setUsedBy(const SourceOperandIndex& source, const SourceOperandIndex& dest) { - mUsedBy[source].emplace(dest); - } - - private: - StepRoleAnalyzer() = default; - - // Merges the step roles of the destination operands to the source operands - // and returns the final map. - std::map<SourceOperandIndex, std::set<StepRole>> finish() { - for (const auto& [source, _] : mUsedBy) { - finishHelper(source); - } - return std::move(mRoles); - } - - void finishHelper(SourceOperandIndex current) { - if (mProcessedOperands.count(current) > 0) return; - mProcessedOperands.insert(current); - const auto it = mUsedBy.find(current); - if (it != mUsedBy.end()) { - auto& roles = mRoles[current]; - // Merge the step roles of the destination operands. - for (const auto& dest : it->second) { - finishHelper(dest); - const auto& destRoles = mRoles[dest]; - roles.insert(destRoles.begin(), destRoles.end()); - } - } - } - - // A map from the source operand to its step roles. - std::map<SourceOperandIndex, std::set<StepRole>> mRoles; - // A map from the source operand to a set of destination operands that may directly - // use the memory of the source operand. - std::map<SourceOperandIndex, std::set<SourceOperandIndex>> mUsedBy; - // Used in finish to track which operand has been processed. - std::set<SourceOperandIndex> mProcessedOperands; -}; - } // namespace -void DynamicTemporaries::vlogDump(const char* context) const { - if (empty()) { - return; - } - if (context) { - VLOG(EXECUTION) << "DynamicTemporaries: \"" << context << "\""; - } - for (const auto& temp : mSourceOperandToTemporary) { - VLOG(EXECUTION) << "DynamicTemporaries: sourceOperandIndex = " << toString(temp.first) - << ", stepIndex = " << temp.second.stepIndex - << ", offset = " << temp.second.offset - << ", dimensions = " << toString(temp.second.dimensions) - << ", paddedLength = " << temp.second.paddedLength - << ", alignment = " << temp.second.alignment - << ", padding = " << temp.second.padding; - } -} - -void DynamicTemporaries::declare(SourceOperandIndex sourceOperandIndex, uint32_t stepIndex, - const Dimensions& initialDimensions, uint32_t initialLength, - uint32_t alignment, uint32_t padding) { - VLOG(EXECUTION) << "DynamicTemporaries::declare(sourceOperandIndex = " - << toString(sourceOperandIndex) << ", stepIndex = " << stepIndex - << ", initialDimensions = " << toString(initialDimensions) - << ", initialLength = " << initialLength << ", alignment = " << alignment - << ", padding = " << padding << ")"; - CHECK(!mDeclared); - CHECK_GT(initialLength, 0u); - const uint32_t paddedLength = roundUp(initialLength, padding); - auto [_, isNew] = mSourceOperandToTemporary.emplace( - sourceOperandIndex, InternalLocationAndShape{stepIndex, 0, initialDimensions, - paddedLength, alignment, padding}); - CHECK(isNew); - mStepIndexToSourceOperandIndexes[stepIndex].emplace_back(sourceOperandIndex); -} - -bool DynamicTemporaries::redeclare(SourceOperandIndex sourceOperandIndex, - const Dimensions& newDimensions, uint32_t newLength) { - auto createAndLogResult = [sourceOperandIndex, &newDimensions, newLength](bool changedShape) { - VLOG(EXECUTION) << "DynamicTemporaries::redeclare(sourceOperandIndex = " - << toString(sourceOperandIndex) - << ", newDimensions = " << toString(newDimensions) - << ", newLength = " << newLength << ") -> " << toString(changedShape); - return changedShape; - }; - - CHECK(mDeclared); - CHECK_GT(newLength, 0u); - - InternalLocationAndShape& temp = mSourceOperandToTemporary.at(sourceOperandIndex); - const uint32_t paddedLength = roundUp(newLength, temp.padding); - if (temp.paddedLength == paddedLength && temp.dimensions == newDimensions) { - return createAndLogResult(false); - } - if (temp.paddedLength < paddedLength) { - // Otherwise allocation remains valid, even if it may be suboptimal - // (because it uses more space than needed). Use case: Don't force - // client to allocate again just because the client reported more - // accurate shape information. - mAllocatedStepIndexes.erase(temp.stepIndex); - } - temp.paddedLength = paddedLength; - temp.dimensions = newDimensions; - return createAndLogResult(true); -} - -int DynamicTemporaries::allocate(uint32_t stepIndex) { - VLOG(EXECUTION) << "DynamicTemporaries::allocate(stepIndex = " << stepIndex << ")"; - - CHECK(mDeclared); - - const auto sourceOperandIndexesI = mStepIndexToSourceOperandIndexes.find(stepIndex); - if (sourceOperandIndexesI == mStepIndexToSourceOperandIndexes.end()) { - return ANEURALNETWORKS_NO_ERROR; - } - - // perform layout - uint32_t newSize = 0; - for (const auto& sourceOperandIndex : sourceOperandIndexesI->second) { - InternalLocationAndShape& temp = mSourceOperandToTemporary.at(sourceOperandIndex); - // temp.paddedLength is already padded in declare and redeclare. - CHECK(temp.paddedLength % temp.padding == 0); - temp.offset = addTemporary(&newSize, temp.paddedLength, temp.alignment, kNoPadding).offset; - } - - // perform (re-)allocation - // TODO: Today we may shrink the allocation in order to avoid wasting memory. Is this important - // to conserve memory, or do we waste time reallocating? - const double kWaste = 0.2 /* arbitrary */; // Willing to waste space to avoid - // deallocation/reallocation overhead - auto& memory = mStepIndexToMemory[stepIndex]; - const uint32_t oldSize = (memory ? memory->getSize() : 0); - if ((oldSize >= newSize) && (oldSize <= newSize * (1 + kWaste))) { - // Suitable allocation already exists; nothing to do - } else { - int n; - std::tie(n, memory) = MemoryAshmem::create(newSize); - if (n != ANEURALNETWORKS_NO_ERROR) { - LOG(ERROR) << "Failed to allocate dynamic temporaries of size " << newSize - << " for step " << stepIndex; - mAllocatedStepIndexes.erase(stepIndex); - return n; - } - } - - mAllocatedStepIndexes.insert(stepIndex); - return ANEURALNETWORKS_NO_ERROR; -} - -bool DynamicTemporaries::allocated(uint32_t stepIndex) const { - return (mStepIndexToSourceOperandIndexes.find(stepIndex) == - mStepIndexToSourceOperandIndexes.end()) || - mAllocatedStepIndexes.count(stepIndex); -} - -std::optional<DynamicTemporaries::LocationAndShape> DynamicTemporaries::lookup( - SourceOperandIndex sourceOperandIndex, bool mustBeAllocated) const { - CHECK(mDeclared); - if (auto it = mSourceOperandToTemporary.find(sourceOperandIndex); - it != mSourceOperandToTemporary.end()) { - const InternalLocationAndShape& temp = it->second; - const bool isAllocated = allocated(temp.stepIndex); - if (mustBeAllocated) { - CHECK(isAllocated) << "Source operand " << toString(sourceOperandIndex) - << " must be allocated"; - } - if (isAllocated) { - return LocationAndShape{mStepIndexToMemory.at(temp.stepIndex).get(), temp.offset, - &temp.dimensions, temp.paddedLength}; - } else { - return LocationAndShape{nullptr, ~uint32_t(0), &temp.dimensions, temp.paddedLength}; - } - } - return std::nullopt; -} - ExecutionStep::ExecutionStep(ExecutionPlan* plan, uint32_t stepIndex, uint32_t sourceModelIndex, std::shared_ptr<Device> device) : mPlan(plan), @@ -451,19 +233,31 @@ // Sets its value. switch (operand.lifetime) { - case Operand::LifeTime::CONSTANT_COPY: { + case OperandLifeTime::CONSTANT_COPY: { const uint8_t* data = sourceModel.getPointerToOperandValue(operand.location.offset); n = mStepModel.setOperandValue(*stepOperandIndex, data, operand.location.length); + if (n != ANEURALNETWORKS_NO_ERROR) { + LOG(ERROR) << "Previous error occurred when partitioning the graph"; + return n; + } } break; - case Operand::LifeTime::CONSTANT_REFERENCE: { - const RuntimeMemory* memory = sourceModel.getMemories()[operand.location.poolIndex]; + case OperandLifeTime::CONSTANT_REFERENCE: { + const Memory* memory = sourceModel.getMemories()[operand.location.poolIndex]; n = mStepModel.setOperandValueFromMemory( *stepOperandIndex, memory, operand.location.offset, operand.location.length); + if (n != ANEURALNETWORKS_NO_ERROR) { + LOG(ERROR) << "Previous error occurred when partitioning the graph"; + return n; + } } break; - case Operand::LifeTime::NO_VALUE: { + case OperandLifeTime::NO_VALUE: { n = mStepModel.setOperandValue(*stepOperandIndex, nullptr, 0); + if (n != ANEURALNETWORKS_NO_ERROR) { + LOG(ERROR) << "Previous error occurred when partitioning the graph"; + return n; + } } break; - case Operand::LifeTime::TEMPORARY_VARIABLE: { // handled similarly to SUBGRAPH_OUTPUT + case OperandLifeTime::TEMPORARY_VARIABLE: { // handled similarly to SUBGRAPH_OUTPUT if (kind == INPUT) { // The first time we've seen this operand is as an // input. That means it must be defined by a @@ -477,10 +271,10 @@ mIndex); } } break; - case Operand::LifeTime::SUBGRAPH_INPUT: { + case OperandLifeTime::SUBGRAPH_INPUT: { mModelInputs.emplace_back(sourceOperandIndex, *stepOperandIndex); } break; - case Operand::LifeTime::SUBGRAPH_OUTPUT: { // handled similarly to TEMPORARY_VARIABLE + case OperandLifeTime::SUBGRAPH_OUTPUT: { // handled similarly to TEMPORARY_VARIABLE if (kind == INPUT) { // The first time we've seen this operand is as an // input. That means it must be defined by a @@ -490,26 +284,22 @@ // The first time we've seen this operand is as an // output. mModelOutputs.emplace_back(sourceOperandIndex, *stepOperandIndex); - // It may be an input to a different partition, so keep track of - // it. - mPlan->recordOutputDef(SourceOperandIndex(mSourceModelIndex, sourceOperandIndex), - mIndex); } } break; - case Operand::LifeTime::SUBGRAPH: { + case OperandLifeTime::SUBGRAPH: { const ModelBuilder* model = sourceModel.getReferencedModel(operand); n = mStepModel.setOperandValueFromModel(*stepOperandIndex, model); + if (n != ANEURALNETWORKS_NO_ERROR) { + LOG(ERROR) << "Previous error occurred when partitioning the graph"; + return n; + } } break; - case Operand::LifeTime::POINTER: { - const void* data = std::get<const void*>(operand.location.pointer); - n = mStepModel.setOperandValue(*stepOperandIndex, data, operand.location.length); + default: { + CHECK(!"unexpected"); } break; } - if (n != ANEURALNETWORKS_NO_ERROR) { - LOG(ERROR) << "Previous error occurred when partitioning the graph"; - } - return n; + return ANEURALNETWORKS_NO_ERROR; } int ExecutionStep::addOperation(int operationIndex) { @@ -527,7 +317,7 @@ // constant, or an operand written by a different partition. // // - We should not have seen any outputs. - auto addOperands = [this](const std::vector<uint32_t>& sourceModelOperands, + auto addOperands = [this](const hidl_vec<uint32_t>& sourceModelOperands, std::vector<uint32_t>* stepModelOperands, OperandKind kind) -> int { const uint32_t operandCount = static_cast<uint32_t>(sourceModelOperands.size()); for (uint32_t i = 0; i < operandCount; i++) { @@ -547,40 +337,28 @@ } void ExecutionStep::mapInputsAndOutputs( - std::shared_ptr<StepExecutor> executor, - const std::vector<OutputShape>* mainModelOutputShapes, const RuntimeMemory* temporaryMemory, - const std::map<SourceOperandIndex, StaticTemporaryLocation>& - sourceOperandToLocationOfTemporary, - const DynamicTemporaries& dynamicTemporaries, + std::shared_ptr<StepExecutor> executor, const Memory* temporaryMemory, + const std::map<SourceOperandIndex, uint32_t>& sourceOperandToOffsetOfTemporary, const std::map<SourceOperandIndex, uint32_t>& sourceOperandToInputIndex, const std::map<SourceOperandIndex, uint32_t>& sourceOperandToOutputIndex, const std::map<SourceOperandIndex, ConstantReferenceLocation>& sourceOperandToConstantReference) const { auto mapInput = [&](uint32_t stepModelOperandIndex, uint32_t stepInputIndex) { SourceOperandIndex sourceOperandIndex(mSourceModelIndex, stepModelOperandIndex); - if (auto it = sourceOperandToLocationOfTemporary.find(sourceOperandIndex); - it != sourceOperandToLocationOfTemporary.end()) { - const auto& loc = it->second; - executor->setInputFromMemory(stepInputIndex, temporaryMemory, loc.offset, - loc.paddedLength); - } else if (auto loc = dynamicTemporaries.lookup(sourceOperandIndex); loc != std::nullopt) { - executor->setInputFromMemory(stepInputIndex, loc->memory, loc->offset, - loc->paddedLength, *loc->dimensions); + if (auto it = sourceOperandToOffsetOfTemporary.find(sourceOperandIndex); + it != sourceOperandToOffsetOfTemporary.end()) { + executor->setInputFromMemory(stepInputIndex, temporaryMemory, it->second); } else if (auto it = sourceOperandToInputIndex.find(sourceOperandIndex); it != sourceOperandToInputIndex.end()) { executor->mapInput(it->second, stepInputIndex); } else if (auto it = sourceOperandToOutputIndex.find(sourceOperandIndex); it != sourceOperandToOutputIndex.end()) { - executor->mapOutputToInput(it->second, stepInputIndex, - mainModelOutputShapes - ? &mainModelOutputShapes->at(it->second).dimensions - : nullptr); + executor->mapOutputToInput(it->second, stepInputIndex); } else if (auto it = sourceOperandToConstantReference.find(sourceOperandIndex); it != sourceOperandToConstantReference.end()) { // Constant partition boundary operand. This could be an IF branch // model input or a WHILE variable initializer. - const auto& loc = it->second; - executor->setInputFromMemory(stepInputIndex, loc.memory, loc.offset, loc.length); + executor->setInputFromMemory(stepInputIndex, it->second.memory, it->second.offset); } else { CHECK(false) << "Cannot map step input " << stepInputIndex << " from operand " << toString(sourceOperandIndex); @@ -588,14 +366,9 @@ }; auto mapOutput = [&](uint32_t stepModelOperandIndex, uint32_t stepOutputIndex) { SourceOperandIndex sourceOperandIndex(mSourceModelIndex, stepModelOperandIndex); - if (auto it = sourceOperandToLocationOfTemporary.find(sourceOperandIndex); - it != sourceOperandToLocationOfTemporary.end()) { - const auto& loc = it->second; - executor->setOutputFromMemory(stepOutputIndex, temporaryMemory, loc.offset, - loc.paddedLength); - } else if (auto loc = dynamicTemporaries.lookup(sourceOperandIndex); loc != std::nullopt) { - executor->setOutputFromMemory(stepOutputIndex, loc->memory, loc->offset, - loc->paddedLength, *loc->dimensions); + if (auto it = sourceOperandToOffsetOfTemporary.find(sourceOperandIndex); + it != sourceOperandToOffsetOfTemporary.end()) { + executor->setOutputFromMemory(stepOutputIndex, temporaryMemory, it->second); } else if (auto it = sourceOperandToOutputIndex.find(sourceOperandIndex); it != sourceOperandToOutputIndex.end()) { executor->mapOutput(it->second, stepOutputIndex); @@ -612,32 +385,6 @@ } } -void ExecutionPlan::CompoundBody::findModelOutputsThatAreDownstreamInputs() { - auto declareModelOutputIsDownstreamInput = - [this](const SourceOperandIndex& sourceOperandIndex) { - const auto it = mOutputToDefiningExecutionStep.find(sourceOperandIndex); - CHECK(it != mOutputToDefiningExecutionStep.end()); - uint32_t stepIndex = it->second; - CHECK_LT(stepIndex, mSteps.size()); - VLOG(COMPILATION) - << "ExecutionStep(" << stepIndex - << ")->declareModelOutputIsDownstreamInput(mSourceOperandToOutputIndex.at" - << toString(sourceOperandIndex) << ")"; - CHECK(mSourceOperandToOutputIndex.find(sourceOperandIndex) != - mSourceOperandToOutputIndex.end()); - mSteps[stepIndex]->executionStep()->declareModelOutputIsDownstreamInput( - mSourceOperandToOutputIndex.at(sourceOperandIndex)); - }; - for (const auto& logicalStep : mSteps) { - if (const ExecutionStep* step = logicalStep->tryExecutionStep()) { - for (const auto& output : step->getOutputsAsStepModelInputs()) { - SourceOperandIndex sourceOperandIndex(step->getSourceModelIndex(), output.first); - declareModelOutputIsDownstreamInput(sourceOperandIndex); - } - } - } -} - void ExecutionPlan::CompoundBody::findTempsAsStepModelOutputs() { auto recordAsOutputIfTemporary = [this](const SourceOperandIndex& sourceOperandIndex) { const auto it = mTemporaryToDefiningExecutionStep.find(sourceOperandIndex); @@ -672,17 +419,6 @@ } } -void ExecutionStep::declareModelOutputIsDownstreamInput(uint32_t mainModelOutputIndex) { - VLOG(COMPILATION) << "ExecutionStep(" << mIndex << ")::declareModelOutputIsDownstreamInput(" - << mainModelOutputIndex << ")"; - const auto it = std::find(mOutputIndexStepModelToMainModel.begin(), - mOutputIndexStepModelToMainModel.end(), mainModelOutputIndex); - CHECK(it != mOutputIndexStepModelToMainModel.end()); - const uint32_t stepModelOutputIndex = it - mOutputIndexStepModelToMainModel.begin(); - CHECK(stepModelOutputIndex < mModelOutputs.size()); - mModelOutputsThatAreDownstreamInputs.insert(stepModelOutputIndex); -} - void ExecutionStep::recordTempAsStepModelOutput(uint32_t stepOperandIndex) { const auto it = mOperandMap.find(stepOperandIndex); CHECK(it != mOperandMap.end()); @@ -730,10 +466,10 @@ } static bool hasUnknownSize(const Operand& operand) { - if (operand.dimensions.empty()) { + if (operand.dimensions.size() == 0) { return TypeManager::get()->isTensorType(operand.type); } - for (const Dimension& dimension : operand.dimensions) { + for (uint32_t dimension : operand.dimensions) { if (dimension == 0) { return true; } @@ -749,8 +485,8 @@ const Operand& operand = mStepModel.getOperand(stepModelOutput.second); if (hasUnknownSize(operand)) { *hasOutputOfUnknownSize = true; - VLOG(COMPILATION) << "StepModelOutput (operand#" << stepModelOutput.first - << " of source graph) has unknown size: " << operand; + VLOG(COMPILATION) << "StepModelOutput (operand#" << toString(stepModelOutput.first) + << " of source graph) has unknown size: " << toString(operand); } } @@ -766,15 +502,6 @@ mStepModelOutputs.insert(mStepModelOutputs.end(), mTempsAsStepModelOutputs.begin(), mTempsAsStepModelOutputs.end()); - // A step model with no inputs or no outputs is an invalid model. Note that we would like to - // attempt full CPU fallback if allowed, so we return OP_FAILED here rather than BAD_DATA from - // model validation. - if (hasNoInputsOrNoOutputs()) { - VLOG(COMPILATION) << "ExecutionStep::finishStepModel: finishing step model with no inputs " - "or no outputs"; - return ANEURALNETWORKS_OP_FAILED; - } - if (mSourceModelIndex == kMainModelInSourceModels) { std::map<uint32_t, uint32_t> mainModelOperandToInputIndex; for (uint32_t i = 0, n = mainModel->inputCount(); i < n; ++i) { @@ -828,43 +555,54 @@ [](auto& e) { return e.second; }); NN_RETURN_IF_ERROR(mStepModel.identifyInputsAndOutputs(inputs.size(), inputs.data(), outputs.size(), outputs.data())); + // TODO: Model::finish() should use ValidationMode::RUNTIME when sending the + // step model to CpuDevice. Right now, this is harmless because the only + // difference in validation occurs with control flow operations and inputs + // or outputs of unknown size and we never send control flow operations to + // CpuDevice. We need to address this if this behavior changes (b/151634976). NN_RETURN_IF_ERROR(mStepModel.finish()); // TODO: Move compilation elsewhere? VLOG(COMPILATION) << "ExecutionStep::finishStepModel, compilation on " << mDevice->getName(); - return compile(*mDevice, mStepModel, executionPreference, priority, {}, *mPlan->getCacheInfo(), + return compile(*mDevice, mStepModel, executionPreference, priority, {}, *mPlan->getCacheDir(), &mToken, &mPreparedStepModel); } void ExecutionStep::dump() const { if (VLOG_IS_ON(COMPILATION)) { VLOG(COMPILATION) << "Step#" << mIndex << ": execute on " << mDevice->getName(); - logModelToInfo(mStepModel.makeModel()); + logModelToInfo(mStepModel.makeHidlModel()); } } -std::ostream& operator<<(std::ostream& os, const IfStep& step) { - return os << "Step#" << step.index << ": if " << toString(step.conditionOperandIndex) - << " then=" << step.thenStepIndex << " else=" << step.elseStepIndex; +std::string toString(const IfStep& step) { + std::ostringstream oss; + oss << "Step#" << step.index << ": if " << toString(step.conditionOperandIndex) + << " then=" << step.thenStepIndex << " else=" << step.elseStepIndex; + return oss.str(); } -std::ostream& operator<<(std::ostream& os, const WhileStep& step) { - return os << "Step#" << step.index << ": while cond=" << step.condStepIndex - << " body=" << step.bodyStepIndex << " exit=" << step.exitStepIndex; +std::string toString(const WhileStep& step) { + std::ostringstream oss; + oss << "Step#" << step.index << ": while cond=" << step.condStepIndex + << " body=" << step.bodyStepIndex << " exit=" << step.exitStepIndex; + return oss.str(); } -std::ostream& operator<<(std::ostream& os, const GotoStep& step) { - return os << "Step#" << step.index << ": goto " << step.gotoStepIndex; +std::string toString(const GotoStep& step) { + std::ostringstream oss; + oss << "Step#" << step.index << ": goto " << step.gotoStepIndex; + return oss.str(); } void LogicalStep::dump() const { if (VLOG_IS_ON(COMPILATION)) { if (const IfStep* step = tryIfStep()) { - VLOG(COMPILATION) << *step; + VLOG(COMPILATION) << toString(*step); } else if (const WhileStep* step = tryWhileStep()) { - VLOG(COMPILATION) << *step; + VLOG(COMPILATION) << toString(*step); } else if (const GotoStep* step = tryGotoStep()) { - VLOG(COMPILATION) << *step; + VLOG(COMPILATION) << toString(*step); } else { executionStep()->dump(); } @@ -873,8 +611,7 @@ int ExecutionPlan::CompoundBody::finish(const SourceModels* sourceModels, int32_t executionPreference, int32_t priority, - const OptionalTimePoint& deadline, - int simulateFailureResultCode) { + const std::optional<Deadline>& deadline) { CHECK(!mSuccessfulFinish); CHECK(!deadline.has_value()); const ModelBuilder* mainModel = sourceModels->getModel(kMainModelInSourceModels); @@ -893,26 +630,8 @@ findTempsAsStepModelOutputs(); for (const auto& logicalStep : mSteps) { if (ExecutionStep* step = logicalStep->tryExecutionStep()) { - bool stepHasDynamicTemporaries = false; - int n = step->finishStepModel(mainModel, &stepHasDynamicTemporaries, + int n = step->finishStepModel(mainModel, &mHasStepModelOutputOfUnknownSize, executionPreference, priority); - if (stepHasDynamicTemporaries) { - mHasDynamicTemporaries = true; - if (step->getDevice()->getFeatureLevel() < kHalVersionV1_2ToApi.featureLevel) { - // Until HAL 1.2, an Operand with lifetime SUBGRAPH_OUTPUT - // must have fully specified dimensions either in the - // Operand or in the RequestArgument. In the case of a - // dynamic temporary, we won't be able to supply fully - // specified dimensions in either. - VLOG(COMPILATION) - << "ExecutionPlan::CompoundBody::finish -- step#" << step->getIndex() - << " defines dynamic temporaries but is scheduled on pre-1.2 device " - << step->getDevice()->getName(); - if (n == ANEURALNETWORKS_NO_ERROR) { - n = ANEURALNETWORKS_OP_FAILED; - } - } - } if (n != ANEURALNETWORKS_NO_ERROR) { VLOG(COMPILATION) << "ExecutionPlan::CompoundBody::finish -- finishStepModel failed"; @@ -939,11 +658,10 @@ CHECK(logicalStep->isGoto()); } } - - if (simulateFailureResultCode != ANEURALNETWORKS_NO_ERROR) { - VLOG(COMPILATION) << "ExecutionPlan::CompoundeBody::finish: simulating failure, ResultCode " - << simulateFailureResultCode; - return simulateFailureResultCode; + if (mHasStepModelOutputOfUnknownSize) { + VLOG(COMPILATION) + << "ExecutionPlan::CompoundBody::finish -- mHasStepModelOutputOfUnknownSize"; + return ANEURALNETWORKS_OP_FAILED; } for (uint32_t i = 0, n = mainModel->inputCount(); i < n; ++i) { @@ -956,8 +674,6 @@ } findControlFlowBoundaryConstants(sourceModels); - findModelOutputsThatAreDownstreamInputs(); - findMemoryStepRoles(); mSuccessfulFinish = true; return ANEURALNETWORKS_NO_ERROR; @@ -970,17 +686,12 @@ const ModelBuilder* sourceModel = sourceModels->getModel(sourceOperandIndex.first); const Operand& operand = sourceModel->getOperand(sourceOperandIndex.second); const DataLocation& location = operand.location; - if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY) { + if (operand.lifetime == OperandLifeTime::CONSTANT_COPY) { mSourceOperandToBoundaryConstantCopy[sourceOperandIndex] = { .buffer = sourceModel->getPointerToOperandValue(location.offset), .length = location.length, }; - } else if (operand.lifetime == Operand::LifeTime::POINTER) { - mSourceOperandToBoundaryConstantCopy[sourceOperandIndex] = { - .buffer = static_cast<const uint8_t*>(std::get<const void*>(location.pointer)), - .length = location.length, - }; - } else if (operand.lifetime == Operand::LifeTime::CONSTANT_REFERENCE) { + } else if (operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) { mSourceOperandToBoundaryConstantReference[sourceOperandIndex] = { .memory = sourceModel->getMemories()[location.poolIndex], .offset = location.offset, @@ -1002,137 +713,44 @@ } } -void ExecutionPlan::CompoundBody::findMemoryStepRoles() { - mSourceOperandToStepRoles = StepRoleAnalyzer::analyze([this](StepRoleAnalyzer& analyzer) { - for (const auto& logicalStep : mSteps) { - if (const ExecutionStep* step = logicalStep->tryExecutionStep()) { - const auto& stepModelInputs = step->getStepModelInputs(); - for (uint32_t i = 0; i < stepModelInputs.size(); i++) { - const auto& [sourceIndex, stepIndex] = stepModelInputs[i]; - analyzer.addRole(*step, sourceIndex, IOType::INPUT, i); - } - const auto& stepModelOutputs = step->getStepModelOutputs(); - for (uint32_t i = 0; i < stepModelOutputs.size(); i++) { - const auto& [sourceIndex, stepIndex] = stepModelOutputs[i]; - analyzer.addRole(*step, sourceIndex, IOType::OUTPUT, i); - } - } else if (const IfStep* step = logicalStep->tryIfStep()) { - // See ExecutionPlan::nextCompound(const IfStep*, ...). - // - // For interpreted IF operation, the outer input memories may be directly used by - // the SUBGRAPH_INPUTs of the then and else model. - CHECK_EQ(step->thenBranchInputOperands.size(), step->outerInputOperands.size()); - CHECK_EQ(step->elseBranchInputOperands.size(), step->outerInputOperands.size()); - for (uint32_t i = 0; i < step->outerInputOperands.size(); i++) { - analyzer.setUsedBy(step->outerInputOperands[i], - step->thenBranchInputOperands[i]); - analyzer.setUsedBy(step->outerInputOperands[i], - step->elseBranchInputOperands[i]); - } - // For interpreted IF operation, the outer output memories may be directly used by - // the SUBGRAPH_OUTPUTs of the then and else model. - CHECK_EQ(step->thenBranchOutputOperands.size(), step->outerOutputOperands.size()); - CHECK_EQ(step->elseBranchOutputOperands.size(), step->outerOutputOperands.size()); - for (uint32_t i = 0; i < step->outerOutputOperands.size(); i++) { - analyzer.setUsedBy(step->outerOutputOperands[i], - step->thenBranchOutputOperands[i]); - analyzer.setUsedBy(step->outerOutputOperands[i], - step->elseBranchOutputOperands[i]); - } - } else if (const WhileStep* step = logicalStep->tryWhileStep()) { - // See ExecutionPlan::nextCompound(const WhileStep*, ...). - // - // For interpreted WHILE operation, the following memories are involved: - // a. the outer input memories to the WHILE operation - // b. the outer output memories to the WHILE operation - // c. the output memory of the condition model - // d. one set of output memories of the body model - // e. another set of output memories of the body model - // - // The memories are used in the following ways: - // - // - Condition model: - // * In the first iteration: inputs use (a); output uses (c) - // * In the following iterations: inputs use (d) or (e) for input-output and - // state-only operands, and (a) for input-only operands; output uses (c) - // - // - Body model: - // * In all iterations: inputs are the same as the condition model; outputs use - // (d) or (e) - // - // Therefore, we configure the analyzer with the following used-by relationships: - // - The outer input memories (a) may be directly used by the SUBGRAPH_INPUTs of - // the condition model for all inputs in the first iteration, as well as the - // input-only operands in the following iterations. - CHECK_EQ(step->condInputOperands.size(), step->outerInputOperands.size()); - for (uint32_t i = 0; i < step->outerInputOperands.size(); i++) { - analyzer.setUsedBy(step->outerInputOperands[i], step->condInputOperands[i]); - } - // - The output memories of the body model (d) and (e) may be directly used by the - // SUBGRAPH_INPUTs of the condition model for input-output and state-only operands - // after the first iteration. - CHECK_GE(step->condInputOperands.size(), step->bodyOutputOperands.size()); - for (uint32_t i = 0; i < step->bodyOutputOperands.size(); i++) { - analyzer.setUsedBy(step->bodyOutputOperands[i], step->condInputOperands[i]); - } - // - The SUBGRAPH_INPUTs of the condition model are directly used by the - // SUBGRAPH_INPUTs of the body model for all inputs in all iterations. - CHECK_EQ(step->bodyInputOperands.size(), step->condInputOperands.size()); - for (uint32_t i = 0; i < step->bodyInputOperands.size(); i++) { - analyzer.setUsedBy(step->condInputOperands[i], step->bodyInputOperands[i]); - } - } else if (logicalStep->isGoto()) { - // Nothing to do. - } else { - CHECK(false) << "Unexpected LogicalStep kind"; - } - } - }); -} - int ExecutionPlan::SimpleBody::finish(const SourceModels*, int32_t executionPreference, - int32_t priority, const OptionalTimePoint& deadline, - int simulateFailureResultCode) { + int32_t priority, const std::optional<Deadline>& deadline) { CHECK(!mSuccessfulFinish); CHECK(mDevice != nullptr); VLOG(COMPILATION) << "ExecutionPlan::SimpleBody::finish, compilation"; - int n = compile(*mDevice, *mModel, executionPreference, priority, deadline, *mCacheInfo, - &mToken, &mPreparedModel); - if (n == ANEURALNETWORKS_NO_ERROR && simulateFailureResultCode != ANEURALNETWORKS_NO_ERROR) { - VLOG(COMPILATION) << "ExecutionPlan::SimpleBody::finish: simulating failure, ResultCode " - << simulateFailureResultCode; - n = simulateFailureResultCode; - } + const int n = compile(*mDevice, *mModel, executionPreference, priority, deadline, *mCacheDir, + &mToken, &mPreparedModel); mSuccessfulFinish = (n == ANEURALNETWORKS_NO_ERROR); return n; } int ExecutionPlan::finish(int32_t executionPreference, int32_t priority, - const OptionalTimePoint& deadline, int simulateFailureResultCode) { + const std::optional<Deadline>& deadline) { CHECK(mBody != nullptr); - return mBody->finish(&getSourceModels(), executionPreference, priority, deadline, - simulateFailureResultCode); + return mBody->finish(&getSourceModels(), executionPreference, priority, deadline); } +ExecutionPlan::Controller::Controller(const ExecutionPlan* plan, ExecutionBuilder* executionBuilder, + const BurstBuilder* burstBuilder) + : Controller(plan, executionBuilder, burstBuilder, 0, {}, {}, {}, {}, {}, {}) {} + ExecutionPlan::Controller::Controller( const ExecutionPlan* plan, ExecutionBuilder* executionBuilder, const BurstBuilder* burstBuilder, uint32_t totalSizeOfTemporaries, - std::map<SourceOperandIndex, StaticTemporaryLocation> sourceOperandToLocationOfTemporary, - std::map<SourceOperandIndex, StaticTemporaryLocation> sourceOperandToLocationOfTemporary2, + std::map<SourceOperandIndex, uint32_t> sourceOperandToOffsetOfTemporary, + std::map<SourceOperandIndex, uint32_t> sourceOperandToOffsetOfTemporary2, std::map<SourceOperandIndex, uint32_t> sourceOperandToInputIndex, std::map<SourceOperandIndex, uint32_t> sourceOperandToOutputIndex, const std::map<SourceOperandIndex, ConstantCopyLocation>& sourceOperandToConstantCopy, - std::map<SourceOperandIndex, ConstantReferenceLocation> sourceOperandToConstantReference, - DynamicTemporaries dynamicTemporaries) + std::map<SourceOperandIndex, ConstantReferenceLocation> sourceOperandToConstantReference) : mPlan(plan), mExecutionBuilder(executionBuilder), mBurstBuilder(burstBuilder), - mSourceOperandToLocationOfTemporary(std::move(sourceOperandToLocationOfTemporary)), - mSourceOperandToLocationOfTemporary2(std::move(sourceOperandToLocationOfTemporary2)), + mSourceOperandToOffsetOfTemporary(std::move(sourceOperandToOffsetOfTemporary)), + mSourceOperandToOffsetOfTemporary2(std::move(sourceOperandToOffsetOfTemporary2)), mSourceOperandToInputIndex(std::move(sourceOperandToInputIndex)), mSourceOperandToOutputIndex(std::move(sourceOperandToOutputIndex)), mSourceOperandToConstantReference(std::move(sourceOperandToConstantReference)), - mDynamicTemporaries(std::move(dynamicTemporaries)), mNextStepIndex(0), mFallbackNextStepIndex(kBadStepIndex), mLastStepSyncFd(-1) { @@ -1146,8 +764,7 @@ mNextStepIndex = kBadStepIndex; } for (const auto& [sourceOperandIndex, location] : sourceOperandToConstantCopy) { - memcpy(mTemporaries->getPointer() + - mSourceOperandToLocationOfTemporary[sourceOperandIndex].offset, + memcpy(mTemporaries->getPointer() + mSourceOperandToOffsetOfTemporary[sourceOperandIndex], location.buffer, location.length); } } @@ -1157,11 +774,12 @@ // indicate the regular execution path should be used. This can occur either // because PreparedModel was nullptr (cpu was best choice), or because the // IPreparedModel was of insufficient version or failed to configure the burst. -std::vector<SharedBurst> ExecutionPlan::makeBursts() const { +std::vector<std::shared_ptr<ExecutionBurstController>> ExecutionPlan::makeBursts( + int preference) const { switch (mState) { // burst object for each partition in the compound case case COMPOUND: { - std::vector<SharedBurst> bursts; + std::vector<std::shared_ptr<ExecutionBurstController>> bursts; bursts.reserve(compound()->mSteps.size()); for (const auto& logicalStep : compound()->mSteps) { if (!logicalStep->isExecution()) { @@ -1170,12 +788,10 @@ } if (const auto preparedModel = logicalStep->executionStep()->getPreparedStepModel()) { - const auto maybeBurst = preparedModel->configureExecutionBurst(); - if (!maybeBurst.has_value()) { - LOG(ERROR) << "preparedModel->configureExecutionBurst() failed with " - << maybeBurst.error().code << ": " << maybeBurst.error().message; - } - bursts.push_back(maybeBurst.value_or(nullptr)); + const bool preferPowerOverLatency = + (preference == ANEURALNETWORKS_PREFER_LOW_POWER); + bursts.push_back( + preparedModel->configureExecutionBurst(preferPowerOverLatency)); } else { bursts.push_back(nullptr); } @@ -1184,15 +800,12 @@ } // single burst object for the simple case case SIMPLE: { - std::vector<SharedBurst> burst; + std::vector<std::shared_ptr<ExecutionBurstController>> burst; auto simpleBody = simple(); if (const auto preparedModel = simpleBody->mPreparedModel) { - const auto maybeBurst = preparedModel->configureExecutionBurst(); - if (!maybeBurst.has_value()) { - LOG(ERROR) << "preparedModel->configureExecutionBurst() failed with " - << maybeBurst.error().code << ": " << maybeBurst.error().message; - } - burst.push_back(maybeBurst.value_or(nullptr)); + const bool preferPowerOverLatency = + (preference == ANEURALNETWORKS_PREFER_LOW_POWER); + burst.push_back(preparedModel->configureExecutionBurst(preferPowerOverLatency)); } else { burst.push_back(nullptr); } @@ -1207,10 +820,11 @@ std::shared_ptr<ExecutionPlan::Controller> ExecutionPlan::makeController( ExecutionBuilder* executionBuilder, const BurstBuilder* burstBuilder) const { CHECK(isValid()); - CHECK(mState != SIMPLE); - const auto* body = compound(); - // Create the layout for a RuntimeMemory object big enough to hold - // - every partition boundary TEMPORARY operand that is not a dynamic temporary, and + if (mState == SIMPLE) { + return std::shared_ptr<Controller>(new Controller(this, executionBuilder, burstBuilder)); + } + // Create the layout for a Memory object big enough to hold + // - every partition boundary TEMPORARY operand and // - buffers required by the control flow implementation. // // TODO: Rethink this approach for managing temporaries. Some @@ -1231,50 +845,46 @@ // what our Memory objects represent. // uint32_t totalSizeOfTemporaries = 0; + auto addTemporaryOfSize = [&totalSizeOfTemporaries](uint32_t size) { + totalSizeOfTemporaries += alignBytesNeeded(totalSizeOfTemporaries, size); + const uint32_t offset = totalSizeOfTemporaries; + totalSizeOfTemporaries += size; + return offset; + }; // This function has two modes of operation: // 1. When lifetime is TEMPORARY_VARIABLE, we allocate memory for - // TEMPORARY_VARIABLE source operands that are not dynamic temporaries, - // skip TEMPORARY_VARIABLE source operands that are dynamic temporaries, - // skip SUBGRAPH_OUTPUT source operands, and panic if we see a source - // operand of another lifetime. + // TEMPORARY_VARIABLE source operands, skip SUBGRAPH_OUTPUT source + // operands, and panic if we see a source operand of another lifetime. // 2. When lifetime is SUBGRAPH_OUTPUT, we allocate memory for // SUBGRAPH_OUTPUT source operands and panic if we see a source operand // of another lifetime. - auto mapTemporary = [body, executionBuilder, &totalSizeOfTemporaries]( - const SourceOperandIndex& sourceOperandIndex, - std::map<SourceOperandIndex, StaticTemporaryLocation>* - sourceOperandToLocationOfTemporary, - Operand::LifeTime lifetime = - Operand::LifeTime::TEMPORARY_VARIABLE) { - CHECK(lifetime == Operand::LifeTime::TEMPORARY_VARIABLE || - lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT); - const Operand& sourceOperand = executionBuilder->getSourceOperand(sourceOperandIndex); - if (lifetime == Operand::LifeTime::TEMPORARY_VARIABLE && - sourceOperand.lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT) { - // See the caller for explanation. - return; - } - CHECK_EQ(sourceOperand.lifetime, lifetime); - const uint32_t size = TypeManager::get()->getSizeOfData(sourceOperand); - if (size != 0u) { - const auto memoryPreference = - body->getMemoryPreferenceOfSourceOperand(sourceOperandIndex); - const auto loc = addTemporary(&totalSizeOfTemporaries, size, memoryPreference.alignment, - memoryPreference.padding); - auto [_, isNew] = sourceOperandToLocationOfTemporary->emplace(sourceOperandIndex, loc); - CHECK(isNew); - VLOG(EXECUTION) << "temp: operand " << toString(sourceOperandIndex) - << " offset = " << loc.offset << " paddedLength = " << loc.paddedLength; - } else { - // Unknown size, hence dynamic temporary. The mapping will - // be established elsewhere (DynamicTemporaries::allocate()). - CHECK_EQ(lifetime, Operand::LifeTime::TEMPORARY_VARIABLE); - CHECK_EQ(sourceOperand.lifetime, Operand::LifeTime::TEMPORARY_VARIABLE); - } - }; - std::map<SourceOperandIndex, StaticTemporaryLocation> sourceOperandToLocationOfTemporary; - std::map<SourceOperandIndex, StaticTemporaryLocation> sourceOperandToLocationOfTemporary2; - for (const auto& logicalStep : body->mSteps) { + auto mapTemporary = + [executionBuilder, addTemporaryOfSize]( + const SourceOperandIndex& sourceOperandIndex, + std::map<SourceOperandIndex, uint32_t>* sourceOperandToOffsetOfTemporary, + OperandLifeTime lifetime = OperandLifeTime::TEMPORARY_VARIABLE) { + CHECK(lifetime == OperandLifeTime::TEMPORARY_VARIABLE || + lifetime == OperandLifeTime::SUBGRAPH_OUTPUT); + const Operand& sourceOperand = + executionBuilder->getSourceOperand(sourceOperandIndex); + if (lifetime == OperandLifeTime::TEMPORARY_VARIABLE && + sourceOperand.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { + // See the caller for explanation. + return; + } + CHECK(sourceOperand.lifetime == lifetime); + const uint32_t size = TypeManager::get()->getSizeOfData(sourceOperand); + CHECK_NE(size, 0u); + const uint32_t offset = addTemporaryOfSize(size); + auto [_, isNew] = + sourceOperandToOffsetOfTemporary->emplace(sourceOperandIndex, offset); + CHECK(isNew); + VLOG(EXECUTION) << "temp: operand " << toString(sourceOperandIndex) + << " offset = " << offset; + }; + std::map<SourceOperandIndex, uint32_t> sourceOperandToOffsetOfTemporary; + std::map<SourceOperandIndex, uint32_t> sourceOperandToOffsetOfTemporary2; + for (const auto& logicalStep : compound()->mSteps) { if (const ExecutionStep* step = logicalStep->tryExecutionStep()) { // Allocate memory for ExecutionStep temporary outputs that are // inputs to other steps, as determined by @@ -1292,7 +902,7 @@ // below). for (const auto& output : step->getTempsAsStepModelOutputs()) { mapTemporary(SourceOperandIndex(step->getSourceModelIndex(), output.first), - &sourceOperandToLocationOfTemporary); + &sourceOperandToOffsetOfTemporary); } } else if (const IfStep* step = logicalStep->tryIfStep()) { // Allocate memory for all temporary outputs of an IfStep because @@ -1314,7 +924,7 @@ // step->bodyOutputOperands and step->condOutputOperand handling // below). for (const auto& sourceOperandIndex : step->outerOutputOperands) { - mapTemporary(sourceOperandIndex, &sourceOperandToLocationOfTemporary); + mapTemporary(sourceOperandIndex, &sourceOperandToOffsetOfTemporary); } } else if (const WhileStep* step = logicalStep->tryWhileStep()) { // Allocate memory for all temporary outputs of an WhileStep because @@ -1331,69 +941,47 @@ // step->bodyOutputOperands and step->condOutputOperand handling // below). for (const auto& sourceOperandIndex : step->outerOutputOperands) { - mapTemporary(sourceOperandIndex, &sourceOperandToLocationOfTemporary); + mapTemporary(sourceOperandIndex, &sourceOperandToOffsetOfTemporary); } // Allocate memory for body model outputs. Note that we could use // the outer output operand memory instead but we currently don't do // so (b/148206073). for (const auto& sourceOperandIndex : step->bodyOutputOperands) { - mapTemporary(sourceOperandIndex, &sourceOperandToLocationOfTemporary, - Operand::LifeTime::SUBGRAPH_OUTPUT); + mapTemporary(sourceOperandIndex, &sourceOperandToOffsetOfTemporary, + OperandLifeTime::SUBGRAPH_OUTPUT); // Allocate another set of temporaries for double buffering. - mapTemporary(sourceOperandIndex, &sourceOperandToLocationOfTemporary2, - Operand::LifeTime::SUBGRAPH_OUTPUT); + mapTemporary(sourceOperandIndex, &sourceOperandToOffsetOfTemporary2, + OperandLifeTime::SUBGRAPH_OUTPUT); } // Allocate memory for condition model output. // TODO: Share one condition output memory region between all loops. - mapTemporary(step->condOutputOperand, &sourceOperandToLocationOfTemporary, - Operand::LifeTime::SUBGRAPH_OUTPUT); + mapTemporary(step->condOutputOperand, &sourceOperandToOffsetOfTemporary, + OperandLifeTime::SUBGRAPH_OUTPUT); } else { CHECK(logicalStep->isGoto()); } } // Allocate temporary memory for boundary CONSTANT_COPY operands. - for (const auto& [sourceOperandIndex, location] : body->mSourceOperandToBoundaryConstantCopy) { - const auto memoryPreference = body->getMemoryPreferenceOfSourceOperand(sourceOperandIndex); - const auto loc = addTemporary(&totalSizeOfTemporaries, location.length, - memoryPreference.alignment, memoryPreference.padding); - sourceOperandToLocationOfTemporary.emplace(sourceOperandIndex, loc); + for (const auto& [sourceOperandIndex, location] : + compound()->mSourceOperandToBoundaryConstantCopy) { + const uint32_t offset = addTemporaryOfSize(location.length); + sourceOperandToOffsetOfTemporary.emplace(sourceOperandIndex, offset); VLOG(EXECUTION) << "temp (boundary constant): operand " << toString(sourceOperandIndex) - << " offset = " << loc.offset << " paddedLength = " << loc.paddedLength; + << " offset = " << offset; } - // Collect dynamic temporaries. - // TODO(b/157236079): Move some or all of this work to compilation time? - DynamicTemporaries dynamicTemporaries; - const TypeManager* typeManager = TypeManager::get(); - forEachDynamicTemporary([body, typeManager, &dynamicTemporaries]( - SourceOperandIndex sourceOperandIndex, - const Operand& sourceOperand, uint32_t definingStepIndex) { - CHECK(typeManager->isTensorType(sourceOperand.type)); - const auto memoryPreference = body->getMemoryPreferenceOfSourceOperand(sourceOperandIndex); - // TODO: For now we guess an initial size equal to element - // size, which is overly conservative. - const uint32_t size = typeManager->getSizeOfData(sourceOperand.type, {1}); - dynamicTemporaries.declare(sourceOperandIndex, definingStepIndex, sourceOperand.dimensions, - size, memoryPreference.alignment, memoryPreference.padding); - }); - dynamicTemporaries.endDeclarations(); - dynamicTemporaries.vlogDump("finished declarations"); - return std::shared_ptr<Controller>(new Controller( this, executionBuilder, burstBuilder, totalSizeOfTemporaries, - std::move(sourceOperandToLocationOfTemporary), - std::move(sourceOperandToLocationOfTemporary2), body->mSourceOperandToInputIndex, - body->mSourceOperandToOutputIndex, body->mSourceOperandToBoundaryConstantCopy, - body->mSourceOperandToBoundaryConstantReference, std::move(dynamicTemporaries))); + std::move(sourceOperandToOffsetOfTemporary), + std::move(sourceOperandToOffsetOfTemporary2), compound()->mSourceOperandToInputIndex, + compound()->mSourceOperandToOutputIndex, + compound()->mSourceOperandToBoundaryConstantCopy, + compound()->mSourceOperandToBoundaryConstantReference)); } // TODO: Find a better way to provide this functionality. int ExecutionPlan::fallback(std::shared_ptr<Controller> controller, - std::shared_ptr<StepExecutor>* executor, SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes) const { + std::shared_ptr<StepExecutor>* executor) const { *executor = nullptr; - if (burstController != nullptr) { - *burstController = nullptr; - } VLOG(EXECUTION) << "ExecutionPlan::fallback(" << SHOW_IF_DEBUG(controller << ", " << executor) << "): mFallbackNextStepIndex = " << controller->mFallbackNextStepIndex; @@ -1409,11 +997,11 @@ } controller->mNextStepIndex = controller->mFallbackNextStepIndex; - return next(controller, executor, burstController, mainModelOutputShapes); + return next(controller, executor); } ExecutionPlan::Buffer::Buffer(void* pointer, uint32_t size) - : mInfo(RunTimePoolInfo::createFromExistingBuffer(static_cast<uint8_t*>(pointer), size)), + : mInfo(RunTimePoolInfo::createFromExistingBuffer(reinterpret_cast<uint8_t*>(pointer), size)), mOffset(0) {} ExecutionPlan::Buffer::Buffer(RunTimePoolInfo info, uint32_t offset) @@ -1459,14 +1047,13 @@ std::optional<ExecutionPlan::Buffer> ExecutionPlan::getBuffer( std::shared_ptr<Controller> controller, SourceOperandIndex operandIndex) const { - const auto& sourceOperandToLocationOfTemporary = - controller->mSourceOperandToLocationOfTemporary; + const auto& sourceOperandToOffsetOfTemporary = controller->mSourceOperandToOffsetOfTemporary; const auto& sourceOperandToInputIndex = controller->mSourceOperandToInputIndex; const auto& sourceOperandToOutputIndex = controller->mSourceOperandToOutputIndex; const auto& sourceOperandToConstantReference = controller->mSourceOperandToConstantReference; - if (auto it = sourceOperandToLocationOfTemporary.find(operandIndex); - it != sourceOperandToLocationOfTemporary.end()) { - const uint32_t offset = it->second.offset; + if (auto it = sourceOperandToOffsetOfTemporary.find(operandIndex); + it != sourceOperandToOffsetOfTemporary.end()) { + const uint32_t offset = it->second; const std::unique_ptr<MemoryAshmem>& memory = controller->mTemporaries; return Buffer(memory->getPointer() + offset, memory->getSize() - offset); } else if (auto it = sourceOperandToInputIndex.find(operandIndex); @@ -1504,11 +1091,9 @@ } int ExecutionPlan::next(std::shared_ptr<Controller> controller, - std::shared_ptr<StepExecutor>* executor, SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes, + std::shared_ptr<StepExecutor>* executor, + std::shared_ptr<ExecutionBurstController>* burstController, int syncFdOfLastStep) const { - CHECK(mState == COMPOUND); - controller->mLastStepSyncFd = syncFdOfLastStep; *executor = nullptr; if (burstController != nullptr) { @@ -1522,13 +1107,39 @@ return ANEURALNETWORKS_OP_FAILED; } - return nextCompound(controller, executor, burstController, mainModelOutputShapes); + if (mState == EMPTY) { + CHECK_EQ(controller->mNextStepIndex, 0u); // end + controller->mNextStepIndex = Controller::kBadStepIndex; + return ANEURALNETWORKS_NO_ERROR; + } + + if (mState == SIMPLE) { + if (controller->mNextStepIndex == 0) { + // First (and only) step. + auto simpleBody = simple(); + *executor = std::make_shared<StepExecutor>(controller->mExecutionBuilder, + simpleBody->mModel, simpleBody->mDevice, + simpleBody->mPreparedModel); + (*executor)->mapInputsAndOutputsTrivially(); + if (burstController != nullptr && controller->mBurstBuilder != nullptr) { + *burstController = controller->mBurstBuilder->getControllerAt(0); + } + controller->mFallbackNextStepIndex = 0; + controller->mNextStepIndex = 1; + return ANEURALNETWORKS_NO_ERROR; + } + + CHECK_EQ(controller->mNextStepIndex, 1u); // end + controller->mNextStepIndex = Controller::kBadStepIndex; + return ANEURALNETWORKS_NO_ERROR; + } + + return nextCompound(controller, executor, burstController); } int ExecutionPlan::nextCompound(std::shared_ptr<Controller> controller, std::shared_ptr<StepExecutor>* executor, - SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes) const { + std::shared_ptr<ExecutionBurstController>* burstController) const { if (controller->mNextStepIndex == Controller::kBadStepIndex) { return ANEURALNETWORKS_OP_FAILED; } @@ -1541,13 +1152,13 @@ const auto& logicalStep = compoundBody->mSteps[controller->mNextStepIndex]; if (const IfStep* step = logicalStep->tryIfStep()) { - return nextCompound(step, controller, executor, burstController, mainModelOutputShapes); + return nextCompound(step, controller, executor, burstController); } else if (const WhileStep* step = logicalStep->tryWhileStep()) { - return nextCompound(step, controller, executor, burstController, mainModelOutputShapes); + return nextCompound(step, controller, executor, burstController); } else if (const GotoStep* step = logicalStep->tryGotoStep()) { - return nextCompound(step, controller, executor, burstController, mainModelOutputShapes); + return nextCompound(step, controller, executor, burstController); } else if (const ExecutionStep* step = logicalStep->tryExecutionStep()) { - return nextCompound(step, controller, executor, burstController, mainModelOutputShapes); + return nextCompound(step, controller, executor, burstController); } else { CHECK(false) << "Unknown step variant"; return ANEURALNETWORKS_BAD_STATE; @@ -1556,24 +1167,16 @@ int ExecutionPlan::nextCompound(const ExecutionStep* step, std::shared_ptr<Controller> controller, std::shared_ptr<StepExecutor>* executor, - SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes) const { + std::shared_ptr<ExecutionBurstController>* burstController) const { VLOG(EXECUTION) << "next: Step#" << controller->mNextStepIndex << ": execute on " << step->getDevice()->getName(); - - NN_RETURN_IF_ERROR(controller->mDynamicTemporaries.allocate(step->getIndex())); - controller->mDynamicTemporaries.vlogDump("finished allocating for a step"); - - *executor = std::make_shared<StepExecutor>(controller->mExecutionBuilder, step->getStepModel(), - step->getDevice(), step->getPreparedStepModel(), - /*reusable=*/false, step, - &controller->mDynamicTemporaries); - + *executor = + std::make_shared<StepExecutor>(controller->mExecutionBuilder, step->getStepModel(), + step->getDevice(), step->getPreparedStepModel(), step); step->mapInputsAndOutputs( - *executor, mainModelOutputShapes, controller->mTemporaries.get(), - controller->mSourceOperandToLocationOfTemporary, controller->mDynamicTemporaries, - controller->mSourceOperandToInputIndex, controller->mSourceOperandToOutputIndex, - controller->mSourceOperandToConstantReference); + *executor, controller->mTemporaries.get(), + controller->mSourceOperandToOffsetOfTemporary, controller->mSourceOperandToInputIndex, + controller->mSourceOperandToOutputIndex, controller->mSourceOperandToConstantReference); if (burstController != nullptr && controller->mBurstBuilder != nullptr) { *burstController = controller->mBurstBuilder->getControllerAt(controller->mNextStepIndex); } @@ -1589,19 +1192,19 @@ VLOG(EXECUTION) << "mapping input " << toString(innerOperand) << " from " << toString(outerOperand); #ifdef NN_DEBUGGABLE - CHECK_LE(mSourceOperandToLocationOfTemporary.count(innerOperand) + + CHECK_LE(mSourceOperandToOffsetOfTemporary.count(innerOperand) + mSourceOperandToInputIndex.count(innerOperand) + mSourceOperandToOutputIndex.count(innerOperand) + mSourceOperandToConstantReference.count(innerOperand), 1u); #endif - mSourceOperandToLocationOfTemporary.erase(innerOperand); + mSourceOperandToOffsetOfTemporary.erase(innerOperand); mSourceOperandToInputIndex.erase(innerOperand); mSourceOperandToOutputIndex.erase(innerOperand); mSourceOperandToConstantReference.erase(innerOperand); - if (auto it = mSourceOperandToLocationOfTemporary.find(outerOperand); - it != mSourceOperandToLocationOfTemporary.end()) { - mSourceOperandToLocationOfTemporary.emplace(innerOperand, it->second); + if (auto it = mSourceOperandToOffsetOfTemporary.find(outerOperand); + it != mSourceOperandToOffsetOfTemporary.end()) { + mSourceOperandToOffsetOfTemporary.emplace(innerOperand, it->second); } else if (auto it = mSourceOperandToInputIndex.find(outerOperand); it != mSourceOperandToInputIndex.end()) { mSourceOperandToInputIndex.emplace(innerOperand, it->second); @@ -1623,15 +1226,15 @@ VLOG(EXECUTION) << "mapping output " << toString(innerOperand) << " from " << toString(outerOperand); #ifdef NN_DEBUGGABLE - CHECK_LE(mSourceOperandToLocationOfTemporary.count(innerOperand) + + CHECK_LE(mSourceOperandToOffsetOfTemporary.count(innerOperand) + mSourceOperandToOutputIndex.count(innerOperand), 1u); #endif - mSourceOperandToLocationOfTemporary.erase(innerOperand); + mSourceOperandToOffsetOfTemporary.erase(innerOperand); mSourceOperandToOutputIndex.erase(innerOperand); - if (auto it = mSourceOperandToLocationOfTemporary.find(outerOperand); - it != mSourceOperandToLocationOfTemporary.end()) { - mSourceOperandToLocationOfTemporary.emplace(innerOperand, it->second); + if (auto it = mSourceOperandToOffsetOfTemporary.find(outerOperand); + it != mSourceOperandToOffsetOfTemporary.end()) { + mSourceOperandToOffsetOfTemporary.emplace(innerOperand, it->second); } else if (auto it = mSourceOperandToOutputIndex.find(outerOperand); it != mSourceOperandToOutputIndex.end()) { mSourceOperandToOutputIndex.emplace(innerOperand, it->second); @@ -1655,14 +1258,10 @@ return n; } -// Invocations of Controller::setInput/setOutput in this function must match with invocations of -// StepRoleAnalyzer::setUsedBy in the IfStep branch in -// ExecutionPlan::CompoundBody::findMemoryStepRoles. int ExecutionPlan::nextCompound(const IfStep* step, std::shared_ptr<Controller> controller, std::shared_ptr<StepExecutor>* executor, - SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes) const { - VLOG(EXECUTION) << "next: " << *step; + std::shared_ptr<ExecutionBurstController>* burstController) const { + VLOG(EXECUTION) << "next: " << toString(*step); // If the last step has a sync fence, wait for it to signal before reading the condition value. // This is safe because the steps are serialized when doing fenced compute. NN_RETURN_IF_ERROR(controller->waitForLastStepSyncFence()); @@ -1695,25 +1294,21 @@ // step->outerOutputOperands[i] to implement double buffering. controller->setOutput(step->outerOutputOperands[i], branchOutputOperands[i]); } - return nextCompound(controller, executor, burstController, mainModelOutputShapes); + return nextCompound(controller, executor, burstController); } -// Invocations of Controller::setInput in this function must match with invocations of -// StepRoleAnalyzer::setUsedBy in the WhileStep branch in -// ExecutionPlan::CompoundBody::findMemoryStepRoles. int ExecutionPlan::nextCompound(const WhileStep* step, std::shared_ptr<Controller> controller, std::shared_ptr<StepExecutor>* executor, - SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes) const { + std::shared_ptr<ExecutionBurstController>* burstController) const { WhileState& state = controller->mWhileState[controller->mNextStepIndex]; if (state.stage == WhileState::EVALUATE_CONDITION) { state.iteration = state.iteration == WhileState::kOutsideLoop ? 0 : state.iteration + 1; - VLOG(EXECUTION) << "next: " << *step << ": iteration " << state.iteration + VLOG(EXECUTION) << "next: " << toString(*step) << ": iteration " << state.iteration << ": evaluating condition"; controller->mNextStepIndex = step->condStepIndex; if (state.iteration == 0) { - state.startTime = Clock::now(); + state.startTime = std::chrono::steady_clock::now(); } // iteration = 0 cond inputs = outer inputs @@ -1732,13 +1327,13 @@ } state.stage = WhileState::EVALUATE_BODY; - return nextCompound(controller, executor, burstController, mainModelOutputShapes); + return nextCompound(controller, executor, burstController); } CHECK(state.stage == WhileState::EVALUATE_BODY); std::chrono::nanoseconds timeoutDuration( controller->mExecutionBuilder->getLoopTimeoutDuration()); - auto duration = Clock::now() - state.startTime; + auto duration = std::chrono::steady_clock::now() - state.startTime; if (duration > timeoutDuration) { LOG(ERROR) << "WHILE loop timed out after " << std::chrono::duration_cast<std::chrono::milliseconds>(duration).count() @@ -1752,7 +1347,7 @@ bool condValue; NN_RETURN_IF_ERROR(readConditionValue(controller, step->condOutputOperand, &condValue)); if (condValue) { - VLOG(EXECUTION) << "next: " << *step << ": iteration " << state.iteration + VLOG(EXECUTION) << "next: " << toString(*step) << ": iteration " << state.iteration << ": evaluating body"; controller->mNextStepIndex = step->bodyStepIndex; @@ -1774,15 +1369,15 @@ #ifdef NN_DEBUGGABLE CHECK_EQ(controller->mSourceOperandToInputIndex.count(outputOperand), 0u); CHECK_EQ(controller->mSourceOperandToOutputIndex.count(outputOperand), 0u); - CHECK_EQ(controller->mSourceOperandToLocationOfTemporary.count(outputOperand), 1u); - CHECK_EQ(controller->mSourceOperandToLocationOfTemporary2.count(outputOperand), 1u); + CHECK_EQ(controller->mSourceOperandToOffsetOfTemporary.count(outputOperand), 1u); + CHECK_EQ(controller->mSourceOperandToOffsetOfTemporary2.count(outputOperand), 1u); #endif - std::swap(controller->mSourceOperandToLocationOfTemporary[outputOperand], - controller->mSourceOperandToLocationOfTemporary2[outputOperand]); + std::swap(controller->mSourceOperandToOffsetOfTemporary[outputOperand], + controller->mSourceOperandToOffsetOfTemporary2[outputOperand]); } } } else { - VLOG(EXECUTION) << "next: " << *step << ": iteration " << state.iteration + VLOG(EXECUTION) << "next: " << toString(*step) << ": iteration " << state.iteration << ": exiting loop"; controller->mNextStepIndex = step->exitStepIndex; @@ -1820,32 +1415,21 @@ } state.stage = WhileState::EVALUATE_CONDITION; - return nextCompound(controller, executor, burstController, mainModelOutputShapes); + return nextCompound(controller, executor, burstController); } int ExecutionPlan::nextCompound(const GotoStep* step, std::shared_ptr<Controller> controller, std::shared_ptr<StepExecutor>* executor, - SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes) const { - VLOG(EXECUTION) << "next: " << *step; + std::shared_ptr<ExecutionBurstController>* burstController) const { + VLOG(EXECUTION) << "next: " << toString(*step); controller->mNextStepIndex = step->gotoStepIndex; - return nextCompound(controller, executor, burstController, mainModelOutputShapes); -} - -std::shared_ptr<StepExecutor> ExecutionPlan::makeStepExecutor( - bool reusable, ExecutionBuilder* executionBuilder) const { - auto simpleBody = simple(); - auto executor = std::make_shared<StepExecutor>(executionBuilder, simpleBody->mModel, - simpleBody->mDevice, simpleBody->mPreparedModel, - reusable); - executor->mapInputsAndOutputsTrivially(); - return executor; + return nextCompound(controller, executor, burstController); } void ExecutionPlan::becomeCompoundIfEmpty() { CHECK(mState != SIMPLE); if (mState == EMPTY) { - mBody = new CompoundBody(this); + mBody = new CompoundBody(); mState = COMPOUND; } } @@ -1886,17 +1470,10 @@ void ExecutionPlan::becomeSingleStep(const std::shared_ptr<Device> device, const ModelBuilder* model) { CHECK(mState == EMPTY); - mBody = new SimpleBody(device, model, mCacheInfo, mToken); + mBody = new SimpleBody(device, model, mCacheDir, mToken); mState = SIMPLE; } -void ExecutionPlan::recordOutputDef(SourceOperandIndex sourceOperandIndex, uint32_t stepIndex) { - auto [it, isNew] = - compound()->mOutputToDefiningExecutionStep.emplace(sourceOperandIndex, stepIndex); - CHECK(isNew) << "Step " << stepIndex << " redefines output operand " - << toString(sourceOperandIndex) << " already defined by step " << it->second; -} - void ExecutionPlan::recordTemporaryDef(SourceOperandIndex sourceOperandIndex, uint32_t stepIndex) { auto [it, isNew] = compound()->mTemporaryToDefiningExecutionStep.emplace(sourceOperandIndex, stepIndex); @@ -1948,28 +1525,8 @@ return compound()->mSteps; } -std::set<uint32_t> ExecutionPlan::forTest_flatGetDynamicTemporaries() const { - CHECK_EQ(getSourceModels().size(), size_t(1)); - std::set<uint32_t> ret; - forEachDynamicTemporary([&ret](SourceOperandIndex dynTemp, const Operand&, uint32_t) { - ret.insert(dynTemp.second); - }); - return ret; -} - -bool ExecutionPlan::hasDynamicTemporaries() const { - return mBody->hasDynamicTemporaries(); -} - -bool ExecutionPlan::forTest_hasStepModelWithNoInputsOrNoOutputs() const { - return mBody->hasStepModelWithNoInputsOrNoOutputs(); -} - -bool ExecutionPlan::CompoundBody::hasStepModelWithNoInputsOrNoOutputs() const { - return std::any_of(mSteps.begin(), mSteps.end(), [](const auto& logicalStep) { - const ExecutionStep* step = logicalStep->tryExecutionStep(); - return step != nullptr && step->hasNoInputsOrNoOutputs(); - }); +bool ExecutionPlan::forTest_hasStepModelOutputsOfUnknownSize() const { + return mBody->hasStepModelOutputsOfUnknownSize(); } const uint8_t* ExecutionPlan::forTest_simpleGetCacheToken() const { @@ -1986,20 +1543,6 @@ } } -SourceOperandIndex ExecutionPlan::getInputSourceOperand(uint32_t index) const { - const auto* mainModel = getSourceModels().getModel(kMainModelInSourceModels); - CHECK_LT(index, mainModel->inputCount()); - const auto operandIndex = mainModel->getInputOperandIndex(index); - return {kMainModelInSourceModels, operandIndex}; -} - -SourceOperandIndex ExecutionPlan::getOutputSourceOperand(uint32_t index) const { - const auto* mainModel = getSourceModels().getModel(kMainModelInSourceModels); - CHECK_LT(index, mainModel->outputCount()); - const auto operandIndex = mainModel->getOutputOperandIndex(index); - return {kMainModelInSourceModels, operandIndex}; -} - void ExecutionPlan::SimpleBody::forEachStepRoleOfInput(uint32_t index, const StepRoleCallback& callback) const { callback(mPreparedModel.get(), IOType::INPUT, index); @@ -2010,70 +1553,48 @@ callback(mPreparedModel.get(), IOType::OUTPUT, index); } -// Map an input role of the main model to the input/output roles in the step models. +// Map an input role of the main model to the input/output roles in the step models: +// - An input role of the main model may be used as an input of multiple step models. +// - An input role of the main model should not be used as an output of any step model. void ExecutionPlan::CompoundBody::forEachStepRoleOfInput(uint32_t index, const StepRoleCallback& callback) const { - const auto sourceOperandIndex = mPlan->getInputSourceOperand(index); - forEachStepRoleOfSourceOperand(sourceOperandIndex, callback); + for (const auto& logicalStep : mSteps) { + if (const ExecutionStep* step = logicalStep->tryExecutionStep()) { + // Model input as step model input. + const auto& inputMapping = step->getInputIndexStepModelToMainModel(); + for (uint32_t i = 0; i < inputMapping.size(); i++) { + if (inputMapping[i] == index) { + callback(step->getPreparedStepModel().get(), IOType::INPUT, i); + } + } + } + } } -// Map an output role of the main model to the input/output roles in the step models. +// Map an output role of the main model to the input/output roles in the step models: +// - An output role of the main model may only be used as one output of one single step model. +// - An output role of the main model may be used as an input of multiple step models. void ExecutionPlan::CompoundBody::forEachStepRoleOfOutput(uint32_t index, const StepRoleCallback& callback) const { - const auto sourceOperandIndex = mPlan->getOutputSourceOperand(index); - forEachStepRoleOfSourceOperand(sourceOperandIndex, callback); -} - -void ExecutionPlan::CompoundBody::forEachStepRoleOfSourceOperand( - const SourceOperandIndex& index, const StepRoleCallback& callback) const { - const auto it = mSourceOperandToStepRoles.find(index); - if (it == mSourceOperandToStepRoles.end()) return; - for (const auto& [stepIndex, type, ioIndex] : it->second) { - CHECK_LT(stepIndex, mSteps.size()); - const auto* step = mSteps[stepIndex]->executionStep(); - callback(step->getPreparedStepModel().get(), type, ioIndex); - } -} - -MemoryPreference ExecutionPlan::getMemoryPreference(IOType type, uint32_t index) const { - CHECK(mState == SIMPLE || mState == COMPOUND); - if (mState == SIMPLE) { - return simple()->mPreparedModel->getMemoryPreference(); - } else { - const auto sourceOperandIndex = type == IOType::INPUT ? getInputSourceOperand(index) - : getOutputSourceOperand(index); - return compound()->getMemoryPreferenceOfSourceOperand(sourceOperandIndex); - } -} - -MemoryPreference ExecutionPlan::CompoundBody::getMemoryPreferenceOfSourceOperand( - const SourceOperandIndex& index) const { - uint32_t alignment = kMinMemoryAlignment, padding = kMinMemoryPadding; - forEachStepRoleOfSourceOperand( - index, [&alignment, &padding](const auto* preparedModel, IOType, uint32_t) { - const auto preference = preparedModel->getMemoryPreference(); - alignment = std::max(alignment, preference.alignment); - padding = std::max(padding, preference.padding); - }); - return {alignment, padding}; -} - -void ExecutionPlan::forEachDynamicTemporary( - const std::function<void(SourceOperandIndex, const Operand&, uint32_t definingStepIndex)>& - fn) const { - if (mState != COMPOUND) { - return; - } - - for (const auto& logicalStep : compound()->mSteps) { + bool found = false; + for (const auto& logicalStep : mSteps) { if (const ExecutionStep* step = logicalStep->tryExecutionStep()) { - const uint32_t stepIndex = step->getIndex(); - const uint32_t sourceModelIndex = step->getSourceModelIndex(); - for (const auto& entry : step->getTempsAsStepModelOutputs()) { - const auto sourceOperandIndex = SourceOperandIndex(sourceModelIndex, entry.first); - const auto& sourceOperand = getSourceOperand(sourceOperandIndex); - if (hasUnknownSize(sourceOperand)) { - fn(sourceOperandIndex, sourceOperand, stepIndex); + // Model output as step model output. + if (!found) { + const auto& outputMapping = step->getOutputIndexStepModelToMainModel(); + for (uint32_t i = 0; i < outputMapping.size(); i++) { + if (outputMapping[i] == index) { + callback(step->getPreparedStepModel().get(), IOType::OUTPUT, i); + found = true; + break; + } + } + } + // Model output as step model input. + const auto& inputToOutputMapping = step->getOutputsAsStepModelInputsIndexToMainModel(); + for (uint32_t i = 0; i < inputToOutputMapping.size(); i++) { + if (inputToOutputMapping[i] == index) { + callback(step->getPreparedStepModel().get(), IOType::INPUT, i); } } } @@ -2082,15 +1603,15 @@ int ModelBuilder::partitionTheWork(const std::vector<std::shared_ptr<Device>>& devices, uint32_t preference, uint32_t priority, - const OptionalTimePoint& deadline, ExecutionPlan* plan, - int simulateFailureResultCode) const { + const std::optional<Deadline>& deadline, + ExecutionPlan* plan) const { uint32_t sourceModelIndex = plan->getSourceModels().addModel(this); NN_RETURN_IF_ERROR(partitionTheWorkInternal(sourceModelIndex, devices, preference, priority, deadline, plan)); - int n = plan->finish(preference, priority, deadline, simulateFailureResultCode); + int n = plan->finish(preference, priority, deadline); if (VLOG_IS_ON(COMPILATION)) { VLOG(COMPILATION) << "ModelBuilder::partitionTheWork: source model: "; - logModelToInfo(makeModel()); + logModelToInfo(makeHidlModel()); plan->dump(); } return n; @@ -2099,7 +1620,7 @@ int ModelBuilder::partitionTheWorkInternal(uint32_t sourceModelIndex, const std::vector<std::shared_ptr<Device>>& devices, uint32_t preference, uint32_t priority, - const OptionalTimePoint& deadline, + const std::optional<Deadline>& deadline, ExecutionPlan* plan) const { // This function uses a heuristic approach to partitioning the graph. // It should be good enough for the first release. @@ -2148,24 +1669,12 @@ // (see LogicalStep). std::vector<std::queue<uint32_t>> perDeviceQueue(deviceCount + 1); - // This helper function produces a device name. - auto deviceName = [&devices, kControlFlowInterpreter, - deviceCount](int deviceIndex) -> std::string { - if (deviceIndex == kControlFlowInterpreter) { - return "NNAPI"; - } else if (deviceIndex < 0 || size_t(deviceIndex) >= deviceCount) { - return "{unknown}"; - } else { - return devices.at(deviceIndex)->getName(); - } - }; - // This helper function enqueues the operation on the appropriate queue. auto enqueueOnAppropriateDevice = [&](uint32_t operationIndex) { int deviceIndex = bestDeviceForOperation[operationIndex]; perDeviceQueue[deviceIndex].push(operationIndex); VLOG(COMPILATION) << "enqueueOnAppropriateDevice " << operationIndex << " onto " - << deviceIndex << " (" << deviceName(deviceIndex) << ")"; + << deviceIndex; }; // This helper function finds a device that has operations ready to process. @@ -2184,14 +1693,11 @@ }; OperandTracker tracker(this, enqueueOnAppropriateDevice); - // For each iteration of this loop, we'll create either an execution step or - // an interpreted control flow construct (including nested execution steps - // and interpreted control flow constructs). + // For each iteration of this loop, we'll create an execution step. while (true) { // Find the device we'll do this step for. int deviceIndex = findNextDeviceToProcess(); - VLOG(COMPILATION) << "findNextDeviceToProcess: " << deviceIndex << " (" - << deviceName(deviceIndex) << ")"; + VLOG(COMPILATION) << "findNextDeviceToProcess: " << deviceIndex; if (deviceIndex < 0) { break; } @@ -2333,7 +1839,7 @@ bodyModelIndex, bodyModel->getOutputOperandIndex(i)); } } else { - CHECK(false) << operation.type << " is not a control flow operation"; + CHECK(false) << toString(operation.type) << " is not a control flow operation"; } tracker.markProcessed(operationIndex, enqueueOnAppropriateDevice); } @@ -2361,7 +1867,7 @@ float ModelBuilder::getPerformance(uint32_t preference, const std::shared_ptr<Device> device, uint32_t operationIndex) const { - auto applyPreference = [preference](const Capabilities::PerformanceInfo& perf) { + auto applyPreference = [preference](const PerformanceInfo& perf) { return preference == ANEURALNETWORKS_PREFER_LOW_POWER ? perf.powerUsage : perf.execTime; }; @@ -2485,7 +1991,7 @@ int ModelBuilder::findBestDeviceForEachOperation( uint32_t preference, const std::vector<std::shared_ptr<Device>>& devices, std::vector<int>* bestDeviceForOperation) const { - const MetaModel metaModel(makeModel(), DeviceManager::get()->strictSlicing()); + const MetaModel metaModel(makeHidlModel(), DeviceManager::get()->strictSlicing()); const size_t deviceCount = devices.size(); std::vector<CanDo> canDo(deviceCount); @@ -2514,19 +2020,14 @@ } } else { float bestPerfVal = 0.0; // Do not check bestPerfVal if bestChoice < 0. - bool bestIsUpdatable = false; for (size_t deviceIndex = 0; deviceIndex < deviceCount; deviceIndex++) { const auto& device = devices[deviceIndex]; if (canDo[deviceIndex].check(operationIndex)) { const float perfVal = getPerformance(preference, device, operationIndex); - const bool isUpdatable = device->isUpdatable(); - const bool deviceIsPreferred = (device == DeviceManager::getCpuDevice() || - (isUpdatable && !bestIsUpdatable)); if (bestChoice < 0 || perfVal < bestPerfVal || - (perfVal == bestPerfVal && deviceIsPreferred)) { + (perfVal == bestPerfVal && device == DeviceManager::getCpuDevice())) { bestChoice = deviceIndex; bestPerfVal = perfVal; - bestIsUpdatable = isUpdatable; } } else { // Somewhat noisy logging, but only place where the user of NNAPI can get @@ -2535,13 +2036,13 @@ // Logs O(operationCount * deviceCount) times, but typically deviceCount is // very small. VLOG(COMPILATION) << "Device " << device->getName() << " can't do operation " - << operation.type << ":" << operationIndex; + << toString(operation.type); } } } if (bestChoice < 0) { - LOG(ERROR) << "No driver can do operation " << operation.type; + LOG(ERROR) << "No driver can do operation " << toString(operation.type); return ANEURALNETWORKS_BAD_DATA; } else if (devices[bestChoice] == DeviceManager::getCpuDevice() && supportedByControlFlowInterpreter(operationIndex)) { @@ -2549,12 +2050,13 @@ // to delegate referenced models. const int kControlFlowInterpreter = deviceCount; (*bestDeviceForOperation)[operationIndex] = kControlFlowInterpreter; - VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation(" << operation.type - << ":" << operationIndex << ") = -1 (NNAPI)"; + VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation(" + << toString(operation.type) << ") = -1" + << " (NNAPI)"; } else { (*bestDeviceForOperation)[operationIndex] = bestChoice; - VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation(" << operation.type - << ":" << operationIndex << ") = " << bestChoice << " (" + VLOG(COMPILATION) << "ModelBuilder::findBestDeviceForEachOperation(" + << toString(operation.type) << ") = " << bestChoice << " (" << devices[bestChoice]->getName() << ")"; } }
diff --git a/runtime/ExecutionPlan.h b/runtime/ExecutionPlan.h index f094ae3..d1e7d94 100644 --- a/runtime/ExecutionPlan.h +++ b/runtime/ExecutionPlan.h
@@ -19,30 +19,27 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EXECUTION_PLAN_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_EXECUTION_PLAN_H -#include <LegacyUtils.h> -#include <TokenHasher.h> #include <android-base/logging.h> -#include <nnapi/IBurst.h> -#include <nnapi/Types.h> +#include <openssl/sha.h> -#include <algorithm> #include <chrono> -#include <functional> #include <map> #include <memory> #include <ostream> #include <set> #include <string> -#include <tuple> #include <unordered_map> #include <utility> #include <variant> #include <vector> +#include "HalInterfaces.h" #include "Memory.h" #include "ModelArgumentInfo.h" #include "ModelBuilder.h" #include "NeuralNetworks.h" +#include "TokenHasher.h" +#include "Utils.h" namespace android { namespace nn { @@ -51,13 +48,13 @@ class CompilationBuilder; class Device; class ExecutionBuilder; +class ExecutionBurstController; class ExecutionPlan; -class RuntimeMemory; -class RuntimePreparedModel; +class Memory; +class PreparedModel; class StepExecutor; struct ConstantReferenceLocation; -struct CacheInfo; // NNAPI Control Flow allows referring to an NNAPI model inside another NNAPI // model using OperandType::SUBGRAPH. For example, an IF operation within a @@ -83,16 +80,9 @@ // output of a partition. For ExecutionStep, the inputs and outputs of the // step model are boundary operands; for IfStep and WhileStep, the inputs and // outputs of the corresponding operation are boundary operands. -// - A partition boundary static temporary is a partition boundary -// operand which is of lifetime TEMPORARY_VARIABLE in the source model and -// whose dimensions are fully specified. -// - A partition boundary dynamic temporary is a partition boundary -// operand which is of lifetime TEMPORARY_VARIABLE in the source model and -// whose dimensions are not fully specified. -// - A main execution is the execution of a main model. // -// Referenced models can be sources of partition boundary operands. For example, -// this happens when a referenced model is partitioned into one or more +// Referenced models can be sources of parition boundary operands. For example, +// this happens when a referenced model is paritioned into one or more // LogicalSteps. // // (model index, operand index within model) @@ -115,118 +105,6 @@ std::vector<const ModelBuilder*> mModels; }; -// Represents all partition boundary dynamic temporaries for a particular main -// execution. -// -// Usage pattern: -// - declare() every partition boundary dynamic temporary. -// - endDeclarations(). After this point, lookup() is permitted. -// - Before executing an ExecutionStep, call allocate(). -// - After executing an ExecutionStep, call redeclare() for every partition -// boundary dynamic temporary for which we've learned or guessed more about -// the dimensions or length. -// -// Each partition boundary temporary has a location assigned by allocate() for -// its defining step (see declare() and allocate()). That location remains -// valid until redeclare() increases the length of some temporary in its defining -// step or allocate() is called again for its defining step. -class DynamicTemporaries { - DISALLOW_COPY_AND_ASSIGN(DynamicTemporaries); - - public: - DynamicTemporaries() = default; - DynamicTemporaries(DynamicTemporaries&&) = default; - DynamicTemporaries& operator=(DynamicTemporaries&&) = default; - - // Declare a dynamic temporary. stepIndex is the step that defines the - // temporary (i.e., in which the temporary appears as an operation output - // operand). initialDimensions and initialLength indicate what we know or - // (in the case of length) guess about those properties. - void declare(SourceOperandIndex sourceOperandIndex, uint32_t stepIndex, - const Dimensions& initialDimensions, uint32_t initialLength, uint32_t alignment, - uint32_t padding); - - // Indicate that we've finished declaring all dynamic temporaries. - void endDeclarations() { - CHECK(!mDeclared); - mDeclared = true; - } - - // Redeclare a dynamic temporary, indicating what we've learned about it. - // This may invalidate the location of temporaries defined by its step. - // Returns true if dimensions or length changed, false otherwise. - bool redeclare(SourceOperandIndex sourceOperandIndex, const Dimensions& newDimensions, - uint32_t newLength); - - // Ensure that all dynamic temporaries defined by the specified step have - // locations. The return value is a ResultCode (e.g., - // ANEURALNETWORKS_NO_ERROR). - // - // Even if dynamic temporaries have already been allocated for this step, - // this call may reallocate them. A reallocation is not guaranteed to - // preserve location (LocationAndShape.memory, LocationAndShape.offset) or - // contents of temporaries. - int allocate(uint32_t stepIndex); - - // Do the dynamic temporaries defined by this step have valid allocations? - // (Will be true if there are no dynamic temporaries defined by this step.) - bool allocated(uint32_t stepIndex) const; - - // Dump information to VLOG(EXECUTION). - void vlogDump(const char* context = nullptr) const; - - // If the specified operand is a dynamic temporary, return location and - // shape information; otherwise, return std::nullopt. - // - // If temporary exists but does not have a valid allocation, then: - // - If mustBeAllocated == true, then trigger a failed CHECK(). - // - If mustBeAllocated == false, then memory == nullptr and offset == ~0. - struct LocationAndShape { - const RuntimeMemory* memory; - uint32_t offset; - const Dimensions* dimensions; - uint32_t paddedLength; - }; - std::optional<LocationAndShape> lookup(SourceOperandIndex sourceOperandIndex, - bool mustBeAllocated = true) const; - - // Have any dynamic temporaries been declared? - bool empty() const { return mSourceOperandToTemporary.empty(); } - - private: - // The same as LocationAndShape, except that: - // - the base of the location is represented not by memory but by defining stepIndex - // - it additionally contains information about the preferred alignment and padding - struct InternalLocationAndShape { - uint32_t stepIndex; - uint32_t offset; - Dimensions dimensions; - uint32_t paddedLength; - uint32_t alignment; - uint32_t padding; - }; - std::map<SourceOperandIndex, InternalLocationAndShape> mSourceOperandToTemporary; - - // Every dynamic temporary defined at a given stepIndex. - std::map<uint32_t, std::vector<SourceOperandIndex>> mStepIndexToSourceOperandIndexes; - - std::map<uint32_t, std::unique_ptr<MemoryAshmem>> mStepIndexToMemory; - - // For a given defining stepIndex, we consider either all its dynamic - // temporaries to be allocated (have valid locations) or none of them to be. - std::set<uint32_t> mAllocatedStepIndexes; - - // Has endDeclarations() been called? - bool mDeclared = false; -}; - -// The location of a static temporary. -struct StaticTemporaryLocation { - // The offset relative to ExecutionPlan::Controller::mTemporaries during execution. - uint32_t offset; - uint32_t paddedLength; -}; - // An excerpt of a source model to be run by a specific device. class ExecutionStep { public: @@ -242,8 +120,6 @@ int addOperand(uint32_t sourceOperandIndex, uint32_t* stepOperandIndex, OperandKind kind); // Each container entry is of the form (source model operand index, step model operand index) - const RemapVectorType& getStepModelInputs() const { return mStepModelInputs; } - const RemapVectorType& getStepModelOutputs() const { return mStepModelOutputs; } const RemapVectorType& getModelInputs() const { return mModelInputs; } const RemapVectorType& getModelOutputs() const { return mModelOutputs; } const RemapVectorType& getTempsAsStepModelInputs() const { return mTempsAsStepModelInputs; } @@ -261,14 +137,8 @@ return mOutputsAsStepModelInputsIndexToMainModel; } - const std::set<uint32_t>& getModelOutputsThatAreDownstreamInputs() const { - return mModelOutputsThatAreDownstreamInputs; - } - - uint32_t getIndex() const { return mIndex; } uint32_t getSourceModelIndex() const { return mSourceModelIndex; } - void declareModelOutputIsDownstreamInput(uint32_t mainModelOutputIndex); void recordTempAsStepModelOutput(uint32_t stepOperandIndex); // If this step has a step model output of unknown size, sets @@ -281,33 +151,20 @@ std::shared_ptr<Device> getDevice() const { return mDevice; } // only available after calling finishStepModel() - std::shared_ptr<RuntimePreparedModel> getPreparedStepModel() const { - return mPreparedStepModel; - } + std::shared_ptr<PreparedModel> getPreparedStepModel() const { return mPreparedStepModel; } // Map inputs and outputs from ExecutionBuilder to StepExecutor. // // This method only reads map entries for which the first element of // SourceOperandIndex is mSourceModelIndex. - // - // mainModelOutputShapes may be nullptr if the only main model outputs that are - // inputs of this step are of fully specified shape. void mapInputsAndOutputs( - std::shared_ptr<StepExecutor> stepExecutor, - const std::vector<OutputShape>* mainModelOutputShapes, - const RuntimeMemory* temporaryMemory, // for static temporaries - const std::map<SourceOperandIndex, StaticTemporaryLocation>& - sourceOperandToLocationOfTemporary, // for static temporaries - const DynamicTemporaries& dynamicTemporaries, + std::shared_ptr<StepExecutor> stepExecutor, const Memory* temporaryMemory, + const std::map<SourceOperandIndex, uint32_t>& sourceOperandToOffsetOfTemporary, const std::map<SourceOperandIndex, uint32_t>& sourceOperandToInputIndex, const std::map<SourceOperandIndex, uint32_t>& sourceOperandToOutputIndex, const std::map<SourceOperandIndex, ConstantReferenceLocation>& sourceOperandToConstantReference) const; - bool hasNoInputsOrNoOutputs() const { - return mStepModelInputs.empty() || mStepModelOutputs.empty(); - } - void dump() const; // For test only, get the transformed cache token. @@ -326,7 +183,7 @@ uint32_t mSourceModelIndex; ModelBuilder mStepModel; // An excerpt of a source model to be run by one device. std::shared_ptr<Device> mDevice; - std::shared_ptr<RuntimePreparedModel> mPreparedStepModel; + std::shared_ptr<PreparedModel> mPreparedStepModel; // All inputs of this step model: // (source model operand index, step model operand index) @@ -334,8 +191,7 @@ // Depending on whether the source operand is an input or output of the main // model, the memory should be mapped using // ExecutionPlan::CompoundBody::mSourceOperandToInputIndex, - // ExecutionPlan::Controller::mSourceOperandToLocationOfTemporary, or - // ExecutionPlan::Controller::mDynamicTemporaries, or + // ExecutionPlan::Controller::mSourceOperandToOffsetOfTemporary, or // ExecutionPlan::CompoundBody::mSourceOperandToOutputIndex. RemapVectorType mStepModelInputs; // All outputs of this step model: @@ -343,12 +199,11 @@ // // Depending on whether the source operand is an output of the main model, // the memory should be mapped using - // ExecutionPlan::CompoundBody::mSourceOperandToOutputIndex, - // ExecutionPlan::Controller::mSourceOperandToLocationOfTemporary, or - // ExecutionPlan::Controller::mDynamicTemporaries. + // ExecutionPlan::CompoundBody::mSourceOperandToOutputIndex or + // ExecutionPlan::Controller::mSourceOperandToOffsetOfTemporary. // - // mOutputIndexStepModelToMainModel and declareModelOutputIsDownstreamInput() - // rely on mModelOutputs being a prefix of mStepModelOutputs. + // mOutputIndexStepModelToMainModel relies on mModelOutputs being a prefix of + // mStepModelOutputs. RemapVectorType mStepModelOutputs; // Inputs of main model that are also inputs of this step model: // (main model operand index, step model operand index) @@ -392,10 +247,6 @@ // mOutputsAsStepModelInputs[i].first std::vector<uint32_t> mOutputsAsStepModelInputsIndexToMainModel; - // Step model output indexes (not operand indexes) that are outputs of the - // main model used as inputs to some other partition. - std::set<uint32_t> mModelOutputsThatAreDownstreamInputs; - // The compilation caching token. TokenHasher mToken; }; @@ -530,9 +381,9 @@ std::variant<ExecutionStep, IfStep, WhileStep, GotoStep> mStep; }; -std::ostream& operator<<(std::ostream& os, const IfStep& step); -std::ostream& operator<<(std::ostream& os, const WhileStep& step); -std::ostream& operator<<(std::ostream& os, const GotoStep& step); +std::string toString(const IfStep& step); +std::string toString(const WhileStep& step); +std::string toString(const GotoStep& step); // Describes the state of WhileStep. struct WhileState { @@ -544,7 +395,7 @@ // loop. uint64_t iteration = kOutsideLoop; // Time point when the loop started executing. - TimePoint startTime; + std::chrono::time_point<std::chrono::steady_clock> startTime; }; struct ConstantCopyLocation { @@ -553,18 +404,11 @@ }; struct ConstantReferenceLocation { - const RuntimeMemory* memory; + const Memory* memory; uint32_t offset; uint32_t length; }; -// A tuple of {execution_step_index, io_type, io_index} specifying an input/output role of an -// ExecutionStep. -using StepRole = std::tuple<uint32_t, IOType, uint32_t>; - -// A callback function that takes the prepared_model, io_type, and io_index of a step role. -using StepRoleCallback = std::function<void(const RuntimePreparedModel*, IOType, uint32_t)>; - class ExecutionPlan { public: ExecutionPlan(const ExecutionPlan&) = delete; @@ -573,8 +417,8 @@ ExecutionPlan() {} ~ExecutionPlan() { delete mBody; } - // Controller is part of the interface to a mechanism for performing a - // main execution in N steps. + // Controller is part of the interface to a mechanism for performing an + // execution in N steps. // // The value of N may not be known beforehand if the model contains WHILE // loops. See LogicalStep. @@ -596,24 +440,20 @@ static const size_t kBadStepIndex = ~size_t(0); + // A constructor for mState == SIMPLE. + Controller(const ExecutionPlan* plan, ExecutionBuilder* executionBuilder, + const BurstBuilder* burstBuilder); // A constructor for mState == COMPOUND. Controller(const ExecutionPlan* plan, ExecutionBuilder* executionBuilder, - const BurstBuilder* burstBuilder, - - // static temporaries - uint32_t totalSizeOfTemporaries, - std::map<SourceOperandIndex, StaticTemporaryLocation> - sourceOperandToLocationOfTemporary, - std::map<SourceOperandIndex, StaticTemporaryLocation> - sourceOperandToLocationOfTemporary2, - + const BurstBuilder* burstBuilder, uint32_t totalSizeOfTemporaries, + std::map<SourceOperandIndex, uint32_t> sourceOperandToOffsetOfTemporary, + std::map<SourceOperandIndex, uint32_t> sourceOperandToOffsetOfTemporary2, std::map<SourceOperandIndex, uint32_t> sourceOperandToInputIndex, std::map<SourceOperandIndex, uint32_t> sourceOperandToOutputIndex, const std::map<SourceOperandIndex, ConstantCopyLocation>& sourceOperandToConstantCopy, std::map<SourceOperandIndex, ConstantReferenceLocation> - sourceOperandToConstantReference, - DynamicTemporaries dynamicTemporaries); + sourceOperandToConstantReference); // Sets the location of innerOperand to be the same as the location of outerOperand. void setInput(const SourceOperandIndex& outerOperand, @@ -627,28 +467,28 @@ // does not generate a sync fence. int waitForLastStepSyncFence() const; - [[maybe_unused]] const ExecutionPlan* mPlan; + const ExecutionPlan* mPlan; ExecutionBuilder* mExecutionBuilder; const BurstBuilder* mBurstBuilder; // Map from source operand index to an offset into mTemporaries used // to represent that operand as an inter-partition input or output. // // The four maps - // - mSourceOperandToLocationOfTemporary + // - mSourceOperandToOffsetOfTemporary // - mSourceOperandToInputIndex // - mSourceOperandToOutputIndex // - mSourceOperandToConstantReference // are initialized from similarly named fields of ExecutionPlan::CompoundBody. // // A particular key appears in at most one map at any given time. This - // restriction does not apply to mSourceOperandToLocationOfTemporary2. + // restriction does not apply to mSourceOperandToOffsetOfTemporary2. // // The maps are modified during the execution of IfStep and WhileStep. // See ExecutionPlan::nextCompound(). - std::map<SourceOperandIndex, StaticTemporaryLocation> mSourceOperandToLocationOfTemporary; + std::map<SourceOperandIndex, uint32_t> mSourceOperandToOffsetOfTemporary; // Map from source operand index to an additional offset into // mTemporaries used for double buffering of WHILE loop output operands. - std::map<SourceOperandIndex, StaticTemporaryLocation> mSourceOperandToLocationOfTemporary2; + std::map<SourceOperandIndex, uint32_t> mSourceOperandToOffsetOfTemporary2; // Map from source operand index to an input index of the main model. std::map<SourceOperandIndex, uint32_t> mSourceOperandToInputIndex; // Map from source operand index to an output index of the main model. @@ -656,12 +496,7 @@ // Map from source operand index to a constant reference location. // Used for WHILE loop operand initializers that are constant references. std::map<SourceOperandIndex, ConstantReferenceLocation> mSourceOperandToConstantReference; - - // static temporaries std::unique_ptr<MemoryAshmem> mTemporaries; - - DynamicTemporaries mDynamicTemporaries; - // Index of the next step to be processed by ExecutionPlan::next(). size_t mNextStepIndex; // The value to reset mNextStepIndex to for partial CPU fallback. @@ -672,33 +507,22 @@ int mLastStepSyncFd; }; - std::vector<SharedBurst> makeBursts() const; + std::vector<std::shared_ptr<ExecutionBurstController>> makeBursts(int preference) const; - // Only legal to call when mState == COMPOUND. std::shared_ptr<Controller> makeController(ExecutionBuilder* executionBuilder, const BurstBuilder* burstBuilder) const; // Sets up a new StepExecutor and burstController (if applicable) if there // is a step to execute. See ExecutionPlan::Controller. // Handles control flow. See LogicalStep. - // burstController is nullptr if we are not to do burst execution. - // mainModelOutputShapes may be nullptr if the only main model outputs that are step model - // inputs are of fully specified shape. // syncFdOfLastStep is the sync fence fd generated by the most recently processed step. - // Only legal to call when mState == COMPOUND. int next(std::shared_ptr<Controller> controller, std::shared_ptr<StepExecutor>* executor, - SharedBurst* burstController, const std::vector<OutputShape>* mainModelOutputShapes, + std::shared_ptr<ExecutionBurstController>* burstController = nullptr, int syncFdOfLastStep = -1) const; // Create the same executor as the last one created by next(). - int fallback(std::shared_ptr<Controller> controller, std::shared_ptr<StepExecutor>* executor, - SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes) const; - - // Only legal to call when mState == SIMPLE. - // See the constructor of StepExecutor for the semantics of "reusable". - std::shared_ptr<StepExecutor> makeStepExecutor(bool reusable, - ExecutionBuilder* executionBuilder) const; + int fallback(std::shared_ptr<Controller> controller, + std::shared_ptr<StepExecutor>* executor) const; ExecutionStep* createNewExecutionStep(uint32_t sourceModelIndex, const std::shared_ptr<Device> device); @@ -711,11 +535,9 @@ void becomeSingleStep(const std::shared_ptr<Device> device, const ModelBuilder* model); - // simulateFailureResultCode == ANEURALNETWORKS_NO_ERROR means behave normally. - int finish(int32_t executionPreference, int32_t priority, const OptionalTimePoint& deadline, - int simulateFailureResultCode); + int finish(int32_t executionPreference, int32_t priority, + const std::optional<Deadline>& deadline); - void recordOutputDef(SourceOperandIndex sourceOperandIndex, uint32_t stepIndex); void recordTemporaryDef(SourceOperandIndex sourceOperandIndex, uint32_t stepIndex); void dump() const; @@ -724,17 +546,16 @@ bool isValid() const { return mState != EMPTY && mBody != nullptr && mBody->mSuccessfulFinish; } bool isSimple() const { return mState == SIMPLE; } - bool isCompound() const { return mState == COMPOUND; } bool isSimpleCpu() const; - void setCaching(const CacheInfo* cacheInfo, const uint8_t* token) { - mCacheInfo = cacheInfo; + void setCaching(const std::string* cacheDir, const uint8_t* token) { + mCacheDir = cacheDir; mToken = token; } - const CacheInfo* getCacheInfo() const { return mCacheInfo; } + const std::string* getCacheDir() const { return mCacheDir; } const uint8_t* getCacheToken() const { return mToken; } - // The caller is responsible for making sure the index is within range. + // The caller is responsible for making sure the index is not out of range. void forEachStepRoleOfInput(uint32_t index, const StepRoleCallback& callback) const { CHECK(mBody != nullptr); mBody->forEachStepRoleOfInput(index, callback); @@ -744,20 +565,9 @@ mBody->forEachStepRoleOfOutput(index, callback); } - // "type" specifies input or output, and "index" is the main model input or output index. - // The caller is responsible for making sure the index is within range. - MemoryPreference getMemoryPreference(IOType type, uint32_t index) const; - SourceModels& getSourceModels() { return mSourceModels; } const SourceModels& getSourceModels() const { return mSourceModels; } - // "index" is the main model input or output index. - // The caller is responsible for making sure the index is within range. - SourceOperandIndex getInputSourceOperand(uint32_t index) const; - SourceOperandIndex getOutputSourceOperand(uint32_t index) const; - - bool hasDynamicTemporaries() const; - // These functions are solely intended for use by unit tests of // the partitioning algorithm. enum class Kind { @@ -769,26 +579,14 @@ Kind forTest_getKind() const; std::shared_ptr<const Device> forTest_simpleGetDevice() const; const std::vector<std::shared_ptr<LogicalStep>>& forTest_compoundGetSteps() const; - void forTest_compoundForEachStepRoleOfSourceOperand(SourceOperandIndex index, - const StepRoleCallback& callback) const { - compound()->forEachStepRoleOfSourceOperand(index, callback); - } - // The "flat" in the name signifies that this method requires that the - // model not contain any control flow operations. - std::set<uint32_t> forTest_flatGetDynamicTemporaries() const; + bool forTest_hasStepModelOutputsOfUnknownSize() const; const uint8_t* forTest_simpleGetCacheToken() const; - bool forTest_hasStepModelWithNoInputsOrNoOutputs() const; private: // Becomes a new COMPOUND step if mState == EMPTY, otherwise does nothing. // Illegal to call for when mState == SIMPLE. void becomeCompoundIfEmpty(); - - const Operand& getSourceOperand(const std::pair<uint32_t, uint32_t>& sourceOperandIndex) const { - return getSourceModels() - .getModel(sourceOperandIndex.first) - ->getOperand(sourceOperandIndex.second); - } + void findTempsAsStepModelOutputs(); class Buffer { public: @@ -814,29 +612,27 @@ // Handles control flow. See LogicalStep. int nextCompound(std::shared_ptr<Controller> controller, - std::shared_ptr<StepExecutor>* executor, SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes) const; + std::shared_ptr<StepExecutor>* executor, + std::shared_ptr<ExecutionBurstController>* burstController) const; int nextCompound(const ExecutionStep* step, std::shared_ptr<Controller> controller, - std::shared_ptr<StepExecutor>* executor, SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes) const; + std::shared_ptr<StepExecutor>* executor, + std::shared_ptr<ExecutionBurstController>* burstController) const; int nextCompound(const IfStep* step, std::shared_ptr<Controller> controller, - std::shared_ptr<StepExecutor>* executor, SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes) const; + std::shared_ptr<StepExecutor>* executor, + std::shared_ptr<ExecutionBurstController>* burstController) const; int nextCompound(const WhileStep* step, std::shared_ptr<Controller> controller, - std::shared_ptr<StepExecutor>* executor, SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes) const; + std::shared_ptr<StepExecutor>* executor, + std::shared_ptr<ExecutionBurstController>* burstController) const; int nextCompound(const GotoStep* step, std::shared_ptr<Controller> controller, - std::shared_ptr<StepExecutor>* executor, SharedBurst* burstController, - const std::vector<OutputShape>* mainModelOutputShapes) const; + std::shared_ptr<StepExecutor>* executor, + std::shared_ptr<ExecutionBurstController>* burstController) const; struct Body { virtual ~Body() {} virtual void dump() const = 0; virtual int finish(const SourceModels* sourceModels, int32_t executionPreference, - int32_t priority, const OptionalTimePoint& deadline, - int simulateFailureResultCode) = 0; - virtual bool hasDynamicTemporaries() const = 0; - virtual bool hasStepModelWithNoInputsOrNoOutputs() const = 0; + int32_t priority, const std::optional<Deadline>& deadline) = 0; + virtual bool hasStepModelOutputsOfUnknownSize() const = 0; virtual void forEachStepRoleOfInput(uint32_t index, const StepRoleCallback& callback) const = 0; virtual void forEachStepRoleOfOutput(uint32_t index, @@ -846,14 +642,13 @@ struct SimpleBody : Body { SimpleBody(std::shared_ptr<Device> device, const ModelBuilder* model, - const CacheInfo* cacheInfo, const uint8_t* token) - : mDevice(device), mModel(model), mCacheInfo(cacheInfo), mToken(token) {} + const std::string* cacheDir, const uint8_t* token) + : mDevice(device), mModel(model), mCacheDir(cacheDir), mToken(token) {} void dump() const override; int finish(const SourceModels* sourceModels, int32_t executionPreference, int32_t priority, - const OptionalTimePoint& deadline, int simulateFailureResultCode) override; - bool hasDynamicTemporaries() const override { return false; } - bool hasStepModelWithNoInputsOrNoOutputs() const override { return false; } + const std::optional<Deadline>& deadline) override; + bool hasStepModelOutputsOfUnknownSize() const override { return false; } void forEachStepRoleOfInput(uint32_t index, const StepRoleCallback& callback) const override; void forEachStepRoleOfOutput(uint32_t index, @@ -861,30 +656,23 @@ std::shared_ptr<Device> mDevice; const ModelBuilder* mModel; - std::shared_ptr<RuntimePreparedModel> mPreparedModel; + std::shared_ptr<PreparedModel> mPreparedModel; - const CacheInfo* mCacheInfo; + const std::string* mCacheDir; TokenHasher mToken; }; struct CompoundBody : Body { - CompoundBody(const ExecutionPlan* plan) : mPlan(plan) { CHECK(plan != nullptr); } - void dump() const override; int finish(const SourceModels* sourceModels, int32_t executionPreference, int32_t priority, - const OptionalTimePoint& deadline, int simulateFailureResultCode) override; - bool hasDynamicTemporaries() const override { return mHasDynamicTemporaries; } - bool hasStepModelWithNoInputsOrNoOutputs() const override; + const std::optional<Deadline>& deadline) override; + bool hasStepModelOutputsOfUnknownSize() const override { + return mHasStepModelOutputOfUnknownSize; + } void forEachStepRoleOfInput(uint32_t index, const StepRoleCallback& callback) const override; void forEachStepRoleOfOutput(uint32_t index, const StepRoleCallback& callback) const override; - // Supported for any legal source operand index. For a source operand that doesn't have a - // step role, the callback will not be invoked at all. - void forEachStepRoleOfSourceOperand(const SourceOperandIndex& index, - const StepRoleCallback& callback) const; - // Supported for any legal source operand index. - MemoryPreference getMemoryPreferenceOfSourceOperand(const SourceOperandIndex& index) const; // TODO: Some of the data is working state information that // shouldn't be needed after we've constructed but not @@ -893,12 +681,6 @@ std::vector<std::shared_ptr<LogicalStep>> mSteps; // Map from source operand index to defining ExecutionStep index. - // Used for all (and only) SUBGRAPH_OUTPUTs that are defined by - // ExecutionSteps. Those defined by IfSteps and WhileSteps are not in - // the map. - std::map<SourceOperandIndex, uint32_t> mOutputToDefiningExecutionStep; - - // Map from source operand index to defining ExecutionStep index. // Used for all (and only) TEMPORARY_VARIABLEs that are defined by // ExecutionSteps. Those defined by IfSteps and WhileSteps are not in // the map. @@ -914,8 +696,7 @@ // to initialize ExecutionPlan::Controller::mSourceOperandToOutputIndex; std::map<SourceOperandIndex, uint32_t> mSourceOperandToOutputIndex; - // Map from source operand index to location of a CONSTANT_COPY or - // POINTER operand. + // Map from source operand index to location of a CONSTANT_COPY operand. // This map only contains constant partition boundary IF and WHILE // operands and is used to create a ExecutionPlan::Controller. std::map<SourceOperandIndex, ConstantCopyLocation> mSourceOperandToBoundaryConstantCopy; @@ -927,30 +708,20 @@ std::map<SourceOperandIndex, ConstantReferenceLocation> mSourceOperandToBoundaryConstantReference; - // Map from source operand index of a boundary operand to the step roles that its memory - // may be used for. - // This map only contains partition boundary operands that have ExecutionStep roles, that - // is, SUBGRAPH_INPUTs, SUBGRAPH_OUTPUTs, and partition boundary static and dynamic - // temporaries. If a partition boundary operand is not found in the map, then the operand - // does not have any ExecutionStep role (this may happen with interpreted control flow). - std::map<SourceOperandIndex, std::set<StepRole>> mSourceOperandToStepRoles; - - bool mHasDynamicTemporaries = false; + bool mHasStepModelOutputOfUnknownSize = false; private: void findTempsAsStepModelOutputs(); - void findModelOutputsThatAreDownstreamInputs(); - // Constant values that are inputs to IF and WHILE operations and lie on // a partition boundary ("control flow boundary constants") require // special treatment. We need to be able to dynamically associate those // values with the corresponding SUBGRAPH_INPUT operands in a referenced // model. // - // For CONSTANT_COPY and POINTER boundary operands, we copy those to - // temporary memory and treat them similarly to TEMPORARY_VARIABLE - // operands in Controller. + // For CONSTANT_COPY boundary operands, we copy those to temporary + // memory and treat them similarly to TEMPORARY_VARIABLE operands in + // Controller. // // For CONSTANT_REFERENCE boundary operands, we keep track of them in // ExecutionPlan::Controller::mSourceOperandToConstantReference. @@ -959,11 +730,6 @@ // constants, we could embed those inside the referenced model, but we // currently don't do so. See b/148216514. void findControlFlowBoundaryConstants(const SourceModels* sourceModels); - - // This method will set mSourceOperandToStepRoles. - void findMemoryStepRoles(); - - const ExecutionPlan* mPlan; }; enum { EMPTY, SIMPLE, COMPOUND } mState = EMPTY; @@ -989,13 +755,9 @@ return static_cast<const CompoundBody*>(mBody); } - void forEachDynamicTemporary(const std::function<void(SourceOperandIndex, const Operand&, - uint32_t definingStepIndex)>&) const; - // Pointers to compilation caching information in CompilationBuilder. - const CacheInfo* mCacheInfo = nullptr; + const std::string* mCacheDir = nullptr; const uint8_t* mToken = nullptr; - SourceModels mSourceModels; };
diff --git a/runtime/FeatureLevel.h b/runtime/FeatureLevel.h deleted file mode 100644 index c25ce8c..0000000 --- a/runtime/FeatureLevel.h +++ /dev/null
@@ -1,31 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_FEATURE_LEVEL_H -#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_FEATURE_LEVEL_H - -#include "NeuralNetworks.h" - -namespace android { -namespace nn { - -// The current feature level of the NNAPI Runtime -constexpr FeatureLevelCode kCurrentNNAPIRuntimeFeatureLevel = ANEURALNETWORKS_FEATURE_LEVEL_5; - -} // namespace nn -} // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_FEATURE_LEVEL_H
diff --git a/runtime/Manager.cpp b/runtime/Manager.cpp index 99ebf69..50d1180 100644 --- a/runtime/Manager.cpp +++ b/runtime/Manager.cpp
@@ -18,114 +18,97 @@ #include "Manager.h" -#include <CpuExecutor.h> -#include <LegacyUtils.h> -#include <MetaModel.h> -#include <Tracing.h> -#include <nnapi/IBurst.h> -#include <nnapi/IDevice.h> -#include <nnapi/IExecution.h> -#include <nnapi/IPreparedModel.h> -#include <nnapi/SharedMemory.h> -#include <nnapi/Types.h> -#include <nnapi/Validation.h> +#include <android/hidl/manager/1.2/IServiceManager.h> +#include <android/sync.h> +#include <build/version.h> +#include <cutils/native_handle.h> +#include <hidl/HidlTransportSupport.h> +#include <hidl/ServiceManagement.h> #include <algorithm> #include <functional> -#include <iterator> -#include <map> #include <memory> #include <string> #include <tuple> #include <utility> #include <vector> -#include "ExecutionCallback.h" -#include "FeatureLevel.h" +#include "Callbacks.h" +#include "CpuExecutor.h" +#include "ExecutionBurstController.h" +#include "HalInterfaces.h" #include "Memory.h" +#include "MetaModel.h" #include "ModelArgumentInfo.h" +#include "Tracing.h" #include "TypeManager.h" - -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD -#include <build/version.h> -#include <cutils/native_handle.h> -#include <nnapi/hal/1.3/Buffer.h> -#include <nnapi/hal/Service.h> - -#include "AppInfoFetcher.h" -#endif // NN_COMPATIBILITY_LIBRARY_BUILD +#include "Utils.h" +#include "VersionedInterfaces.h" namespace android { namespace nn { +using namespace hal; + +const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; + // A Device with actual underlying driver class DriverDevice : public Device { public: // Create a DriverDevice from a name and a DeviceFactory function. // Returns nullptr on failure. - static std::shared_ptr<DriverDevice> create(SharedDevice device, bool isUpdatable = false); + static std::shared_ptr<DriverDevice> create(const std::string& name, + const DeviceFactory& makeDevice); // Prefer using DriverDevice::create - explicit DriverDevice(SharedDevice device, bool isUpdatable); + DriverDevice(std::shared_ptr<VersionedIDevice> device); const std::string& getName() const override { return kInterface->getName(); } const std::string& getVersionString() const override { return kInterface->getVersionString(); } - int64_t getFeatureLevel() const override; - int32_t getType() const override { return static_cast<int32_t>(kInterface->getType()); } - bool isUpdatable() const override { return kIsUpdatable; } + int64_t getFeatureLevel() const override { return kInterface->getFeatureLevel(); } + int32_t getType() const override { return kInterface->getType(); } const std::vector<Extension>& getSupportedExtensions() const override { return kInterface->getSupportedExtensions(); } std::vector<bool> getSupportedOperations(const MetaModel& metaModel) const override; - const Capabilities& getCapabilities() const override { return kInterface->getCapabilities(); } - Capabilities::PerformanceInfo getPerformance(OperandType type) const override { - return getCapabilities().operandPerformance.lookup(type); + PerformanceInfo getPerformance(OperandType type) const override { + const auto& capabilities = kInterface->getCapabilities(); + return lookup(capabilities.operandPerformance, type); } - Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override { - return getCapabilities().relaxedFloat32toFloat16PerformanceScalar; + PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override { + const auto& capabilities = kInterface->getCapabilities(); + return capabilities.relaxedFloat32toFloat16PerformanceScalar; } - Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override { - return getCapabilities().relaxedFloat32toFloat16PerformanceTensor; + PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override { + const auto& capabilities = kInterface->getCapabilities(); + return capabilities.relaxedFloat32toFloat16PerformanceTensor; } - Capabilities::PerformanceInfo getIfPerformance() const override { - return getCapabilities().ifPerformance; + PerformanceInfo getIfPerformance() const override { + const auto& capabilities = kInterface->getCapabilities(); + return capabilities.ifPerformance; } - Capabilities::PerformanceInfo getWhilePerformance() const override { - return getCapabilities().whilePerformance; - } - std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override { - return kInterface->getNumberOfCacheFilesNeeded(); + PerformanceInfo getWhilePerformance() const override { + const auto& capabilities = kInterface->getCapabilities(); + return capabilities.whilePerformance; } bool isCachingSupported() const override { // Caching is supported if either of numModelCache or numDataCache is greater than 0. - const auto [numModelCacheFiles, numDataCacheFiles] = getNumberOfCacheFilesNeeded(); + const auto [numModelCacheFiles, numDataCacheFiles] = + kInterface->getNumberOfCacheFilesNeeded(); return numModelCacheFiles > 0 || numDataCacheFiles > 0; } - int wait() const override { - auto result = kInterface->wait(); - if (!result.ok()) { - LOG(ERROR) << "DriverDevice::wait error: " << result.error().message; - return convertErrorStatusToResultCode(result.error().code); - } - return ANEURALNETWORKS_NO_ERROR; - } + int wait() const override { return kInterface->wait(); } - std::pair<int, std::shared_ptr<RuntimePreparedModel>> prepareModel( + std::pair<int, std::shared_ptr<PreparedModel>> prepareModel( const ModelFactory& makeModel, ExecutionPreference preference, Priority priority, - const OptionalTimePoint& deadline, const CacheInfo& cacheInfo, + const std::optional<Deadline>& deadline, const std::string& cacheDir, const std::optional<CacheToken>& maybeToken) const override; - std::pair<int, std::unique_ptr<RuntimeMemory>> allocate(const MemoryDescriptor& desc, - OperandType) const override; + std::pair<int, std::unique_ptr<Memory>> allocate(const MemoryDescriptor& desc, + hal::OperandType) const override; private: - const SharedDevice kInterface; - const bool kIsUpdatable; - - GeneralResult<std::vector<bool>> getSupportedOperationsImpl(const MetaModel& metaModel) const; - GeneralResult<SharedPreparedModel> prepareModelFromCacheInternal( - const OptionalTimePoint& deadline, const CacheInfo& cacheInfo, - const CacheToken& token) const; + const std::shared_ptr<VersionedIDevice> kInterface; #ifdef NN_DEBUGGABLE // For debugging: behavior of IDevice::getSupportedOperations for SampleDriver. @@ -135,95 +118,48 @@ #endif // NN_DEBUGGABLE }; -// A RuntimePreparedModel with underlying IPreparedModel instance return by actual driver. -class DriverPreparedModel : public RuntimePreparedModel { +// A PreparedModel with underlying IPreparedModel instance return by actual driver. +class DriverPreparedModel : public PreparedModel { public: - DriverPreparedModel(const Device* device, const SharedPreparedModel& preparedModel) + DriverPreparedModel(const Device* device, + const std::shared_ptr<VersionedIPreparedModel>& preparedModel) : mDevice(device), mPreparedModel(preparedModel) { CHECK(mDevice != nullptr); CHECK(mPreparedModel != nullptr); } const Device* getDevice() const override { return mDevice; } - SharedPreparedModel getInterface() const override { return mPreparedModel; } - + std::shared_ptr<VersionedIPreparedModel> getInterface() const override { + return mPreparedModel; + } std::tuple<int, std::vector<OutputShape>, Timing> execute( const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, const SharedBurst& burstController, - MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration) const override; + const std::vector<const Memory*>& memories, + const std::shared_ptr<ExecutionBurstController>& burstController, MeasureTiming measure, + const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration) const override; - std::tuple<int, int, ExecuteFencedInfoCallback, Timing> executeFenced( + std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing> executeFenced( const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor, - MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration, - const OptionalDuration& timeoutDurationAfterFence) const override; + const std::vector<const Memory*>& memories, const std::vector<int>& waitFor, + MeasureTiming measure, const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; - std::pair<int, std::shared_ptr<RuntimeExecution>> createReusableExecution( - const std::vector<ModelArgumentInfo>& inputs, - const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, MeasureTiming measure, - const OptionalDuration& loopTimeoutDuration) const override; - - GeneralResult<SharedBurst> configureExecutionBurst() const override { - return mPreparedModel->configureExecutionBurst(); - } - - MemoryPreference getMemoryPreference() const override { - if (mDevice->getFeatureLevel() >= ANEURALNETWORKS_FEATURE_LEVEL_5) { - return {kDefaultRequestMemoryAlignment, kDefaultRequestMemoryPadding}; - } else { - // We are not able to pass memory padding information to HIDL drivers, so return the - // minimum padding. - return {kDefaultRequestMemoryAlignment, kMinMemoryPadding}; - } + std::shared_ptr<ExecutionBurstController> configureExecutionBurst( + bool preferPowerOverLatency) const override { + return mPreparedModel->configureExecutionBurst(preferPowerOverLatency); } private: const Device* mDevice; - const SharedPreparedModel mPreparedModel; + const std::shared_ptr<VersionedIPreparedModel> mPreparedModel; }; -class DriverExecution : public RuntimeExecution { - public: - DriverExecution(SharedExecution execution, Request request, - std::vector<const RuntimeMemory*> memories, MeasureTiming measure, - OptionalDuration loopTimeoutDuration, int64_t deviceFeatureLevel) - : kExecution(std::move(execution)), - kRequest(std::move(request)), - kMemories(std::move(memories)), - kMeasure(measure), - kLoopTimeoutDuration(std::move(loopTimeoutDuration)), - kDeviceFeatureLevel(deviceFeatureLevel) { - CHECK(kExecution != nullptr); - } - - std::tuple<int, std::vector<OutputShape>, Timing> compute( - const SharedBurst& burstController, const OptionalTimePoint& deadline) const override; - - std::tuple<int, int, ExecuteFencedInfoCallback, Timing> computeFenced( - const std::vector<int>& waitFor, const OptionalTimePoint& deadline, - const OptionalDuration& timeoutDurationAfterFence) const override; - - private: - const SharedExecution kExecution; - - // For burst execution. - const Request kRequest; - const std::vector<const RuntimeMemory*> kMemories; - const MeasureTiming kMeasure; - const OptionalDuration kLoopTimeoutDuration; - mutable std::map<const IBurst*, SharedExecution> mCachedBurstExecutions; - - // For fenced execution. - const int64_t kDeviceFeatureLevel; -}; - -DriverDevice::DriverDevice(SharedDevice device, bool isUpdatable) - : kInterface(std::move(device)), kIsUpdatable(isUpdatable) { +DriverDevice::DriverDevice(std::shared_ptr<VersionedIDevice> device) + : kInterface(std::move(device)) { CHECK(kInterface != nullptr); #ifdef NN_DEBUGGABLE static const char samplePrefix[] = "sample"; @@ -233,74 +169,39 @@ #endif // NN_DEBUGGABLE } -std::shared_ptr<DriverDevice> DriverDevice::create(SharedDevice device, bool isUpdatable) { +std::shared_ptr<DriverDevice> DriverDevice::create(const std::string& name, + const DeviceFactory& makeDevice) { + CHECK(makeDevice != nullptr); + std::shared_ptr<VersionedIDevice> device = VersionedIDevice::create(name, makeDevice); if (device == nullptr) { - LOG(ERROR) << "DriverDevice::create called with nullptr"; + LOG(ERROR) << "DriverDevice::create failed to create VersionedIDevice object for service " + << name; return nullptr; } - return std::make_shared<DriverDevice>(std::move(device), isUpdatable); -} - -int64_t DriverDevice::getFeatureLevel() const { - Version featureLevel = kInterface->getFeatureLevel(); - switch (featureLevel) { - case Version::ANDROID_OC_MR1: - return ANEURALNETWORKS_FEATURE_LEVEL_1; - case Version::ANDROID_P: - return ANEURALNETWORKS_FEATURE_LEVEL_2; - case Version::ANDROID_Q: - return ANEURALNETWORKS_FEATURE_LEVEL_3; - case Version::ANDROID_R: - return ANEURALNETWORKS_FEATURE_LEVEL_4; - case Version::ANDROID_S: - return ANEURALNETWORKS_FEATURE_LEVEL_5; - case Version::CURRENT_RUNTIME: - break; - } - LOG(FATAL) << "Unsupported driver feature level: " << featureLevel; - return -1; -} - -GeneralResult<std::vector<bool>> DriverDevice::getSupportedOperationsImpl( - const MetaModel& metaModel) const { - const auto featureLevel = kInterface->getFeatureLevel(); - const auto slice = metaModel.getSlice(featureLevel); - if (!slice.has_value()) { - return NN_ERROR() << "getSlice(" << featureLevel << ") failed"; - } - - const auto& [sliceModel, slicedModelOperationIndexToModelOperationIndex] = *slice; - const std::vector<bool> supported = NN_TRY(kInterface->getSupportedOperations(sliceModel)); - const uint32_t slicedOperationCount = sliceModel.main.operations.size(); - if (supported.size() != slicedOperationCount) { - return NN_ERROR() << "IDevice::getSupportedOperations returned a vector of length " - << supported.size() << " when expecting " << slicedOperationCount; - } - - const Model& model = metaModel.getModel(); - const uint32_t operationCount = model.main.operations.size(); - std::vector<bool> remappedSupported(operationCount, false); - for (size_t i = 0; i < supported.size(); ++i) { - if (supported[i]) { - remappedSupported[slicedModelOperationIndexToModelOperationIndex(i)] = true; - } - } - return remappedSupported; + return std::make_shared<DriverDevice>(std::move(device)); } std::vector<bool> DriverDevice::getSupportedOperations(const MetaModel& metaModel) const { - const Model& model = metaModel.getModel(); + // Query the driver for what it can do. + ErrorStatus status = ErrorStatus::GENERAL_FAILURE; + std::vector<bool> supportedOperations; + std::tie(status, supportedOperations) = kInterface->getSupportedOperations(metaModel); - auto result = getSupportedOperationsImpl(metaModel); - if (!result.ok()) { - LOG(ERROR) << "getSupportedOperations failed with code " << result.error().code << ": " - << result.error().message; + const Model& hidlModel = metaModel.getModel(); + const uint32_t operationCount = hidlModel.main.operations.size(); + if (status != ErrorStatus::NONE) { + LOG(ERROR) << "IDevice::getSupportedOperations returned the error " << toString(status); // Set the supported operation vectors to all false, so we won't use this driver. - return std::vector<bool>(model.main.operations.size(), false); + return std::vector<bool>(operationCount, false); + } + if (supportedOperations.size() != operationCount) { + LOG(ERROR) << "IDevice::getSupportedOperations returned a vector of length " + << supportedOperations.size() << " when expecting " << operationCount; + // Set the supported operation vectors to all false, so we won't use this driver. + return std::vector<bool>(operationCount, false); } - std::vector<bool>& supportedOperations = result.value(); #ifdef NN_DEBUGGABLE if (mSupported != 1) { return supportedOperations; @@ -313,18 +214,17 @@ } uint32_t accumulator = baseAccumulator; - const Operation& operation = model.main.operations[operationIndex]; + const Operation& operation = hidlModel.main.operations[operationIndex]; accumulator ^= static_cast<uint32_t>(operation.type); - auto accumulateOperands = [&model, &accumulator](const std::vector<uint32_t>& operands) { + auto accumulateOperands = [&hidlModel, &accumulator](const hidl_vec<uint32_t>& operands) { for (uint32_t operandIndex : operands) { - const Operand& operand = model.main.operands[operandIndex]; + const Operand& operand = hidlModel.main.operands[operandIndex]; accumulator ^= static_cast<uint32_t>(operand.type); accumulator ^= operand.dimensions.size(); - for (const Dimension& dimension : operand.dimensions) { + for (uint32_t dimension : operand.dimensions) { accumulator ^= dimension; - if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY || - operand.lifetime == Operand::LifeTime::CONSTANT_REFERENCE || - operand.lifetime == Operand::LifeTime::POINTER) { + if (operand.lifetime == OperandLifeTime::CONSTANT_COPY || + operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) { accumulator ^= 1; } } @@ -341,486 +241,307 @@ return supportedOperations; } -// Opens a cache file for reading and writing and returns a shared handle. -static GeneralResult<SharedHandle> createCacheHandle(const std::string& filename, - bool createIfNotExist) { - auto fd = base::unique_fd(open(filename.c_str(), createIfNotExist ? (O_RDWR | O_CREAT) : O_RDWR, - S_IRUSR | S_IWUSR)); - if (fd.get() == -1) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "Failed to " << (createIfNotExist ? "open or create" : "open") << " cache file " - << filename; - } - std::vector<base::unique_fd> fds; - fds.push_back(std::move(fd)); - return std::make_shared<const Handle>(Handle{ - .fds = std::move(fds), - .ints = {}, - }); -} - -// Opens a list of cache files and returns a vector of shared handles. The files -// are always opened with both read and write permissions. -static GeneralResult<std::vector<SharedHandle>> createCacheHandleVec( - uint32_t numCacheFiles, const std::string& baseFilename, bool createIfNotExist) { - CHECK(numCacheFiles <= kMaxNumberOfCacheFiles); - std::vector<SharedHandle> handles; - handles.reserve(numCacheFiles); - for (uint32_t i = 0; i < numCacheFiles; i++) { - std::string filename = baseFilename + std::to_string(i); - VLOG(COMPILATION) << "Cache " << i << ": " << filename; - handles.push_back(NN_TRY(createCacheHandle(filename, createIfNotExist))); - } - return handles; -} - -// Maps a token to cache file names and returns a pair of vectors of shared -// handles to the opened files. -static GeneralResult<CacheHandles> getCacheHandles( - const CacheInfo& cacheInfo, const CacheToken& token, - const std::pair<uint32_t, uint32_t>& numCacheFiles, bool createIfNotExist) { - if (const auto* cacheHandles = std::get_if<CacheHandles>(&cacheInfo.variant)) { - if (cacheHandles->modelCache.size() != numCacheFiles.first) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "Expected " << numCacheFiles.first << " model cache handles, got " - << cacheHandles->modelCache.size(); - } - if (cacheHandles->dataCache.size() != numCacheFiles.second) { - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) - << "Expected " << numCacheFiles.second << " data cache handles, got " - << cacheHandles->dataCache.size(); - } - return *cacheHandles; - } - - // The filename includes kByteSizeOfCacheToken * 2 characters for token, - // and 1 character for model/data cache identifier. - std::string filename(kByteSizeOfCacheToken * 2 + 1, '0'); - for (uint32_t i = 0; i < kByteSizeOfCacheToken; i++) { - filename[i * 2] = 'A' + (token[i] & 0x0F); - filename[i * 2 + 1] = 'A' + (token[i] >> 4); - } - - const auto& cacheDir = std::get<CacheDir>(cacheInfo.variant); - CHECK(cacheDir.empty() || cacheDir.back() == '/'); - std::string cacheFileName = cacheDir + filename; - const uint32_t cacheTypeIdentifierIndex = cacheDir.size() + kByteSizeOfCacheToken * 2; - - cacheFileName[cacheTypeIdentifierIndex] = '1'; - std::vector<SharedHandle> modelCache = - NN_TRY(createCacheHandleVec(numCacheFiles.first, cacheFileName, createIfNotExist)); - - cacheFileName[cacheTypeIdentifierIndex] = '2'; - std::vector<SharedHandle> dataCache = - NN_TRY(createCacheHandleVec(numCacheFiles.second, cacheFileName, createIfNotExist)); - - return CacheHandles{ - .modelCache = std::move(modelCache), - .dataCache = std::move(dataCache), - }; -} - -GeneralResult<SharedPreparedModel> DriverDevice::prepareModelFromCacheInternal( - const OptionalTimePoint& deadline, const CacheInfo& cacheInfo, - const CacheToken& token) const { - // Get cache files if they exist, otherwise return from the function early. - auto cache = NN_TRY(getCacheHandles(cacheInfo, token, kInterface->getNumberOfCacheFilesNeeded(), - /*createIfNotExist=*/false)); - return kInterface->prepareModelFromCache(deadline, cache.modelCache, cache.dataCache, token); -} - -std::pair<int, std::shared_ptr<RuntimePreparedModel>> DriverDevice::prepareModel( +std::pair<int, std::shared_ptr<PreparedModel>> DriverDevice::prepareModel( const ModelFactory& makeModel, ExecutionPreference preference, Priority priority, - const OptionalTimePoint& deadline, const CacheInfo& cacheInfo, + const std::optional<Deadline>& deadline, const std::string& cacheDir, const std::optional<CacheToken>& maybeToken) const { - // Attempt to compile from cache if token is present. - if (maybeToken.has_value()) { - auto result = prepareModelFromCacheInternal(deadline, cacheInfo, *maybeToken); - if (result.has_value()) { - return {ANEURALNETWORKS_NO_ERROR, - std::make_shared<DriverPreparedModel>(this, std::move(result).value())}; - } else { - LOG(ERROR) << "prepareModelFromCache failure (" << result.error().code - << "): " << result.error().message; - } + const auto [n, preparedModel] = kInterface->prepareModel(makeModel, preference, priority, + deadline, cacheDir, maybeToken); + if (n != ANEURALNETWORKS_NO_ERROR) { + return {n, nullptr}; } - - // Get cache files if they exist, otherwise create them. - CacheHandles cache; - if (maybeToken.has_value()) { - auto result = - getCacheHandles(cacheInfo, *maybeToken, kInterface->getNumberOfCacheFilesNeeded(), - /*createIfNotExist=*/true); - if (result.has_value()) { - cache = std::move(result).value(); - } else { - LOG(ERROR) << "getCacheHandles failure (" << result.error().code - << "): " << result.error().message; - } - } - - // Get the token if it exists, otherwise get a null token. - static constexpr CacheToken kNullToken = {}; - const CacheToken token = maybeToken.value_or(kNullToken); - - // Fallback to full compilation (possibly with token) if - // prepareModelFromCache could not be used or failed. - const Model model = makeModel(); - auto result = kInterface->prepareModel(model, preference, priority, deadline, cache.modelCache, - cache.dataCache, token); - if (!result.ok()) { - LOG(ERROR) << "IDevice::prepareModel() error: " << result.error().message; - return {convertErrorStatusToResultCode(result.error().code), nullptr}; - } - SharedPreparedModel preparedModel = std::move(result).value(); - CHECK(preparedModel != nullptr) - << "IDevice::prepareModel() returned nullptr without error code"; - return {ANEURALNETWORKS_NO_ERROR, - std::make_shared<DriverPreparedModel>(this, std::move(preparedModel))}; + CHECK(preparedModel != nullptr) << "prepareModel returned nullptr without error code"; + return {ANEURALNETWORKS_NO_ERROR, std::make_shared<DriverPreparedModel>(this, preparedModel)}; } -std::pair<int, std::unique_ptr<RuntimeMemory>> DriverDevice::allocate(const MemoryDescriptor& desc, - OperandType) const { - const BufferDesc bufferDesc = {.dimensions = desc.dimensions}; - std::vector<SharedPreparedModel> preparedModels(desc.preparedModels.size()); +std::pair<int, std::unique_ptr<Memory>> DriverDevice::allocate(const MemoryDescriptor& desc, + hal::OperandType) const { + const BufferDesc hidlDesc = {.dimensions = desc.dimensions}; + std::vector<std::shared_ptr<VersionedIPreparedModel>> preparedModels( + desc.preparedModels.size()); std::transform(desc.preparedModels.begin(), desc.preparedModels.end(), preparedModels.begin(), [](const auto* preparedModel) { const auto versionedPreparedModel = preparedModel->getInterface(); CHECK(versionedPreparedModel != nullptr); return versionedPreparedModel; }); - auto result = - kInterface->allocate(bufferDesc, preparedModels, desc.inputRoles, desc.outputRoles); - if (!result.ok()) { + auto [status, buffer, token] = + kInterface->allocate(hidlDesc, preparedModels, desc.inputRoles, desc.outputRoles); + if (status != ErrorStatus::NONE) { LOG(ERROR) << "DriverDevice::allocate -- memory allocation on device " << getName() << " failed!"; - return {convertErrorStatusToResultCode(result.error().code), nullptr}; + return {convertErrorStatusToResultCode(status), nullptr}; } - return MemoryFromDevice::create(std::move(result).value()); + return MemoryFromDevice::create(std::move(buffer), token); } -static Request createDriverRequest(const std::vector<ModelArgumentInfo>& inputs, - const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories) { - Request request; - request.inputs.reserve(inputs.size()); - std::transform(inputs.begin(), inputs.end(), std::back_inserter(request.inputs), - [](const auto& input) { return input.createRequestArgument(); }); - request.outputs.reserve(outputs.size()); - std::transform(outputs.begin(), outputs.end(), std::back_inserter(request.outputs), - [](const auto& output) { return output.createRequestArgument(); }); - request.pools.reserve(memories.size()); - std::transform(memories.begin(), memories.end(), std::back_inserter(request.pools), - [](const RuntimeMemory* memory) { return memory->getMemoryPool(); }); - return request; +// Figures out how to place each of the input or outputs in a buffer. This just +// does the layout and memory allocation, it does not copy data. Aligns each +// input a bit. +static std::tuple<int, std::unique_ptr<MemoryAshmem>, std::vector<DataLocation>> +allocatePointerArgumentsToPool(const std::vector<ModelArgumentInfo>& args, + std::vector<const Memory*>* memories) { + CHECK(memories != nullptr); + std::vector<DataLocation> ptrArgsLocations; + const uint32_t nextPoolIndex = memories->size(); + int64_t total = 0; + for (const auto& info : args) { + if (info.state() == ModelArgumentInfo::POINTER) { + // TODO Good enough alignment? + total += alignBytesNeeded(static_cast<uint32_t>(total), info.length()); + ptrArgsLocations.push_back({.poolIndex = nextPoolIndex, + .offset = static_cast<uint32_t>(total), + .length = info.length()}); + total += info.length(); + } + }; + if (total > 0xFFFFFFFF) { + LOG(ERROR) << "allocatePointerArgumentsToPool: ANeuralNetworksExecution: Size of all " + "inputs or outputs exceeds 2^32."; + return {ANEURALNETWORKS_BAD_DATA, nullptr, std::vector<DataLocation>{}}; + } + if (total <= 0) { + return {ANEURALNETWORKS_NO_ERROR, nullptr, std::vector<DataLocation>{}}; + } + auto [n, memory] = MemoryAshmem::create(total); + if (n != ANEURALNETWORKS_NO_ERROR) { + return {n, nullptr, std::vector<DataLocation>{}}; + } + memories->push_back(memory.get()); + return {ANEURALNETWORKS_NO_ERROR, std::move(memory), std::move(ptrArgsLocations)}; } -// Perform computation on an actual device driver. +// Perform computation on an actual HIDL driver. // // Because HIDL cannot take raw pointers, two separate memory pools will be allocated for inputs and // outputs specified by pointers. The input pointer data will be copied to the input pool prior to // execution, and the output pointer data will be copied out from the output pool after the // execution. +// +// The HIDL invocation will choose between sync/async execution according to +// DeviceManager::mSyncExecHal. std::tuple<int, std::vector<OutputShape>, Timing> DriverPreparedModel::execute( const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, const SharedBurst& burstController, - MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration) const { + const std::vector<const Memory*>& memories, + const std::shared_ptr<ExecutionBurstController>& burstController, MeasureTiming measure, + const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration) const { NNTRACE_RT(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "DriverPreparedModel::execute"); - auto request = createDriverRequest(inputs, outputs, memories); + // Make a copy of the memory tracker as we will append memory pools for pointer arguments. + std::vector<const Memory*> localMemories = memories; - NNTRACE_RT_SWITCH(NNTRACE_PHASE_EXECUTION, "DriverPreparedModel::execute::execute"); + // We separate the input & output pools so accelerators only need to copy + // the contents of the input pools. We could also use it to set protection + // on read only memory but that's not currently done. - ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> result; + // Layout the input and output data + const auto [n1, inputPtrArgsMemory, inputPtrArgsLocations] = + allocatePointerArgumentsToPool(inputs, &localMemories); + if (n1 != ANEURALNETWORKS_NO_ERROR) { + return {n1, {}, kNoTiming}; + } + const auto [n2, outputPtrArgsMemory, outputPtrArgsLocations] = + allocatePointerArgumentsToPool(outputs, &localMemories); + if (n2 != ANEURALNETWORKS_NO_ERROR) { + return {n2, {}, kNoTiming}; + } - // compute using burst if present, otherwise compute from IPreparedModel - const bool burstCompute = (burstController != nullptr); - if (burstCompute) { - for (const RuntimeMemory* memory : memories) { - const auto pool = memory->getMemoryPool(); - if (const auto* maybeMemory = std::get_if<SharedMemory>(&pool)) { - auto cacheHold = burstController->cacheMemory(*maybeMemory); - memory->hold(cacheHold); + // Copy the input data that was specified via a pointer. + if (inputPtrArgsMemory != nullptr) { + uint32_t ptrInputIndex = 0; + for (const auto& info : inputs) { + if (info.state() == ModelArgumentInfo::POINTER) { + const DataLocation& loc = inputPtrArgsLocations[ptrInputIndex++]; + uint8_t* const data = inputPtrArgsMemory->getPointer(); + memcpy(data + loc.offset, info.buffer(), loc.length); } } - - VLOG(EXECUTION) << "Before burstController->execute() " << SHOW_IF_DEBUG(request); - - result = burstController->execute(request, measure, deadline, loopTimeoutDuration); - } else { - result = mPreparedModel->execute(request, measure, deadline, loopTimeoutDuration); } + Request request; + request.inputs = createRequestArguments(inputs, inputPtrArgsLocations); + request.outputs = createRequestArguments(outputs, outputPtrArgsLocations); + uint32_t count = localMemories.size(); + request.pools.resize(count); + for (uint32_t i = 0; i < count; i++) { + request.pools[i] = localMemories[i]->getMemoryPool(); + } + + NNTRACE_FULL_SWITCH(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION, + "DriverPreparedModel::execute::execute"); + int n = ANEURALNETWORKS_OP_FAILED; std::vector<OutputShape> outputShapes; - Timing timing; + Timing timing = kNoTiming; - if (result.ok()) { - n = ANEURALNETWORKS_NO_ERROR; - std::tie(outputShapes, timing) = std::move(result).value(); - } else { - auto [message, code, returnedOutputShapes] = std::move(result).error(); - VLOG(EXECUTION) << "**Execution failed** (ResultCode = " << n << ")"; - LOG(ERROR) << (burstCompute ? "IBurst" : "IPreparedModel") - << "::execute(...) error: " << message; - n = convertErrorStatusToResultCode(code); - if (code == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - outputShapes = std::move(returnedOutputShapes); + // compute using burst if present + const bool burstCompute = (burstController != nullptr); + bool burstFallback = true; + if (burstCompute) { + const bool compliant = compliantWithV1_2(request); + if (compliant) { + V1_0::Request request12 = convertToV1_2(request); + std::vector<intptr_t> memoryIds; + memoryIds.reserve(localMemories.size()); + for (const Memory* memory : localMemories) { + memory->usedBy(burstController); + memoryIds.push_back(memory->getKey()); + } + + VLOG(EXECUTION) << "Before ExecutionBurstController->compute() " + << SHOW_IF_DEBUG(toString(request12)); + std::tie(n, outputShapes, timing, burstFallback) = + burstController->compute(request12, measure, memoryIds); } + } + + // compute from IPreparedModel if either: + // (1) burst was not supplied, or + // (2) the burst execution failed and requested a fallback execution + if (!burstCompute || burstFallback) { + const bool preferSynchronous = DeviceManager::get()->syncExecHal(); + std::tie(n, outputShapes, timing) = mPreparedModel->execute( + request, measure, deadline, loopTimeoutDuration, preferSynchronous); + } + + if (n != ANEURALNETWORKS_NO_ERROR) { + VLOG(EXECUTION) << "**Execution failed**"; return {n, std::move(outputShapes), timing}; } + // Copy the output data from shared memory to the output buffers. + NNTRACE_RT_SWITCH(NNTRACE_PHASE_RESULTS, "DriverPreparedModel::execute"); + if (outputPtrArgsMemory != nullptr) { + uint32_t ptrOutputIndex = 0; + for (const auto& info : outputs) { + if (info.state() == ModelArgumentInfo::POINTER) { + const DataLocation& loc = outputPtrArgsLocations[ptrOutputIndex++]; + const uint8_t* const data = outputPtrArgsMemory->getPointer(); + memcpy(info.buffer(), data + loc.offset, loc.length); + } + } + } + VLOG(EXECUTION) << "DriverPreparedModel::execute completed"; return {ANEURALNETWORKS_NO_ERROR, std::move(outputShapes), timing}; } -std::tuple<int, int, ExecuteFencedInfoCallback, Timing> DriverPreparedModel::executeFenced( +std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing> +DriverPreparedModel::executeFenced( const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor, - MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration, - const OptionalDuration& timeoutDurationAfterFence) const { + const std::vector<const Memory*>& memories, const std::vector<int>& waitFor, + hal::MeasureTiming measure, const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) const { NNTRACE_RT(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "DriverPreparedModel::executeFenced"); - CHECK(std::all_of(waitFor.begin(), waitFor.end(), [](int fd) { return fd >= 0; })); + CHECK(std::all_of(waitFor.begin(), waitFor.end(), [](int fd) { return fd > 0; })); + // Make a copy of the memory tracker as we will append memory pools for pointer arguments. + std::vector<const Memory*> localMemories = memories; + sp<hal::IFencedExecutionCallback> executeFencedCallback; + hal::Timing timing = kNoTiming; - auto request = createDriverRequest(inputs, outputs, memories); + // We separate the input & output pools so accelerators only need to copy + // the contents of the input pools. We could also use it to set protection + // on read only memory but that's not currently done. - NNTRACE_RT_SWITCH(NNTRACE_PHASE_EXECUTION, "DriverPreparedModel::executeFenced"); - - std::vector<SyncFence> waitForHandles; - waitForHandles.reserve(waitFor.size()); - for (int fd : waitFor) { - int dupFd = dup(fd); - if (dupFd < 0) { - LOG(ERROR) << "Unable to dup the file descriptor"; - return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, {}}; - } - waitForHandles.push_back(SyncFence::create(base::unique_fd(dupFd))); + // Layout the input and output data + const auto [n1, inputPtrArgsMemory, inputPtrArgsLocations] = + allocatePointerArgumentsToPool(inputs, &localMemories); + if (n1 != ANEURALNETWORKS_NO_ERROR) { + return {n1, -1, nullptr, timing}; + } + const auto [n2, outputPtrArgsMemory, outputPtrArgsLocations] = + allocatePointerArgumentsToPool(outputs, &localMemories); + if (n2 != ANEURALNETWORKS_NO_ERROR) { + return {n2, -1, nullptr, timing}; } - SyncFence syncFence = SyncFence::createAsSignaled(); - ExecuteFencedInfoCallback executeFencedInfoCallback = nullptr; - Timing timing = {}; - if (mDevice->getFeatureLevel() >= kHalVersionV1_3ToApi.featureLevel) { - auto result = mPreparedModel->executeFenced(request, waitForHandles, measure, deadline, - loopTimeoutDuration, timeoutDurationAfterFence); - if (!result.ok()) { - LOG(ERROR) << "IPreparedModel::executeFenced() error: " << result.error().message; - VLOG(EXECUTION) << "**executeFenced failed**"; - return {convertErrorStatusToResultCode(result.error().code), -1, nullptr, {}}; - } - std::tie(syncFence, executeFencedInfoCallback) = std::move(result).value(); - } else { - // Fallback to synchronous execution if executeFenced is not supported. - // First wait for all sync fences to be ready. - LOG(INFO) << "No drivers able to handle sync fences, falling back to regular execution"; - for (const auto& fence : waitForHandles) { - if (!fence.hasFd() || fence.getFd() < 0) { - return {ANEURALNETWORKS_BAD_DATA, -1, nullptr, {}}; - } - auto r = fence.syncWait({/* no timeout */}); - if (r != SyncFence::FenceState::SIGNALED) { - LOG(ERROR) << "syncWait failed, fd: " << fence.getFd() << ", state: " << r; - return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, {}}; + // Copy the input data that was specified via a pointer. + if (inputPtrArgsMemory != nullptr) { + uint32_t ptrInputIndex = 0; + for (const auto& info : inputs) { + if (info.state() == ModelArgumentInfo::POINTER) { + const DataLocation& loc = inputPtrArgsLocations[ptrInputIndex++]; + uint8_t* const data = inputPtrArgsMemory->getPointer(); + memcpy(data + loc.offset, info.buffer(), loc.length); } } - auto result = mPreparedModel->execute(request, measure, deadline, loopTimeoutDuration); - if (!result.ok()) { - LOG(ERROR) << "IPreparedModel::execute() error: " << result.error().message; - return {convertErrorStatusToResultCode(result.error().code), -1, nullptr, {}}; + } + + Request request; + request.inputs = createRequestArguments(inputs, inputPtrArgsLocations); + request.outputs = createRequestArguments(outputs, outputPtrArgsLocations); + uint32_t count = localMemories.size(); + request.pools.resize(count); + for (uint32_t i = 0; i < count; i++) { + request.pools[i] = localMemories[i]->getMemoryPool(); + } + + NNTRACE_FULL_SWITCH(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION, + "DriverPreparedModel::executeFenced"); + + int n = ANEURALNETWORKS_OP_FAILED; + hidl_vec<hidl_handle> waitForHandles; + waitForHandles.resize(waitFor.size()); + for (uint32_t i = 0; i < waitFor.size(); i++) { + native_handle_t* nativeHandle = native_handle_create(1, 0); + if (nativeHandle == nullptr) { + LOG(ERROR) << "Failed to create native_handle"; + return {n, -1, nullptr, timing}; } - std::tie(std::ignore, timing) = result.value(); + int dupFd = dup(waitFor[i]); + if (dupFd <= 0) { + LOG(ERROR) << "Unable to dup the file descriptor"; + return {n, -1, nullptr, timing}; + } + nativeHandle->data[0] = dupFd; + hidl_handle hidlHandle; + hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true); + waitForHandles[i] = std::move(hidlHandle); + } + + hidl_handle syncFence; + std::tie(n, syncFence, executeFencedCallback, timing) = + mPreparedModel->executeFenced(request, waitForHandles, measure, deadline, + loopTimeoutDuration, timeoutDurationAfterFence); + + if (n != ANEURALNETWORKS_NO_ERROR) { + VLOG(EXECUTION) << "**executeFenced failed**"; + return {n, -1, nullptr, timing}; } int syncFenceFd = -1; - if (syncFence.hasFd()) { - syncFenceFd = dup(syncFence.getFd()); + if (syncFence.getNativeHandle()) { + syncFenceFd = dup(syncFence.getNativeHandle()->data[0]); if (syncFenceFd < 0) { LOG(ERROR) << "Failed to dup the file descriptor"; return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, timing}; } } + // If output buffer is provided as a malloc pointer, wait for the execution to finish. + // Then copy the output data from shared memory to the output buffers. + if (outputPtrArgsMemory != nullptr) { + NNTRACE_RT_SWITCH(NNTRACE_PHASE_RESULTS, "DriverPreparedModel::executeFenced"); + if (syncFenceFd > 0) { + auto r = syncWait(syncFenceFd, -1); + if (r != FenceState::SIGNALED) { + LOG(ERROR) << "syncWait failed, fd: " << syncFenceFd; + return {ANEURALNETWORKS_OP_FAILED, syncFenceFd, nullptr, timing}; + } + } + uint32_t ptrOutputIndex = 0; + for (const auto& info : outputs) { + if (info.state() == ModelArgumentInfo::POINTER) { + const DataLocation& loc = outputPtrArgsLocations[ptrOutputIndex++]; + const uint8_t* const data = outputPtrArgsMemory->getPointer(); + memcpy(info.buffer(), data + loc.offset, loc.length); + } + } + } VLOG(EXECUTION) << "DriverPreparedModel::executeFenced completed"; - return {ANEURALNETWORKS_NO_ERROR, syncFenceFd, executeFencedInfoCallback, timing}; -} - -std::pair<int, std::shared_ptr<RuntimeExecution>> DriverPreparedModel::createReusableExecution( - const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, MeasureTiming measure, - const OptionalDuration& loopTimeoutDuration) const { - NNTRACE_RT(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "DriverPreparedModel::createReusableExecution"); - - auto request = createDriverRequest(inputs, outputs, memories); - auto result = mPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration); - if (!result.ok()) { - LOG(ERROR) << "IPreparedModel::createReusableExecution() error: " << result.error().message; - const int n = convertErrorStatusToResultCode(result.error().code); - return {n, nullptr}; - } - auto execution = std::make_shared<DriverExecution>( - std::move(result).value(), std::move(request), memories, measure, loopTimeoutDuration, - mDevice->getFeatureLevel()); - return {ANEURALNETWORKS_NO_ERROR, std::move(execution)}; -} - -std::tuple<int, std::vector<OutputShape>, Timing> DriverExecution::compute( - const SharedBurst& burstController, const OptionalTimePoint& deadline) const { - NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "DriverExecution::compute"); - - // compute using burst if present, otherwise compute from IPreparedModel - SharedExecution execution; - const bool burstCompute = (burstController != nullptr); - if (burstCompute) { - // create a reusable burst execution if the controller is not seen before - auto burstExecution = mCachedBurstExecutions.find(burstController.get()); - if (burstExecution == mCachedBurstExecutions.end()) { - for (const RuntimeMemory* memory : kMemories) { - const auto pool = memory->getMemoryPool(); - if (const auto* maybeMemory = std::get_if<SharedMemory>(&pool)) { - auto cacheHold = burstController->cacheMemory(*maybeMemory); - memory->hold(cacheHold); - } - } - auto createResult = burstController->createReusableExecution(kRequest, kMeasure, - kLoopTimeoutDuration); - if (!createResult.ok()) { - LOG(ERROR) << "IBurst::createReusableExecution() error: " - << createResult.error().message; - const int n = convertErrorStatusToResultCode(createResult.error().code); - return {n, {}, {}}; - } - execution = std::move(createResult).value(); - mCachedBurstExecutions.emplace(burstController.get(), execution); - } else { - execution = burstExecution->second; - } - VLOG(EXECUTION) << "Before mBurstExecution->compute() " << SHOW_IF_DEBUG(kRequest); - } else { - execution = kExecution; - } - - CHECK(execution != nullptr); - auto result = execution->compute(deadline); - if (!result.ok()) { - auto [message, code, returnedOutputShapes] = std::move(result).error(); - int n = convertErrorStatusToResultCode(code); - VLOG(EXECUTION) << "**Execution failed** (ResultCode = " << n << ")"; - LOG(ERROR) << (burstCompute ? "IBurst" : "IPreparedModel") - << "::execute(...) error: " << message; - if (code == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - return {n, std::move(returnedOutputShapes), {}}; - } - return {n, {}, {}}; - } - - VLOG(EXECUTION) << "DriverExecution::compute completed"; - auto [outputShapes, timing] = std::move(result).value(); - return {ANEURALNETWORKS_NO_ERROR, std::move(outputShapes), timing}; -} - -std::tuple<int, int, ExecuteFencedInfoCallback, Timing> DriverExecution::computeFenced( - const std::vector<int>& waitFor, const OptionalTimePoint& deadline, - const OptionalDuration& timeoutDurationAfterFence) const { - NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "DriverExecution::computeFenced"); - CHECK(std::all_of(waitFor.begin(), waitFor.end(), [](int fd) { return fd >= 0; })); - - std::vector<SyncFence> waitForHandles; - waitForHandles.reserve(waitFor.size()); - for (int fd : waitFor) { - int dupFd = dup(fd); - if (dupFd < 0) { - LOG(ERROR) << "Unable to dup the file descriptor"; - return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, {}}; - } - waitForHandles.push_back(SyncFence::create(base::unique_fd(dupFd))); - } - - SyncFence syncFence = SyncFence::createAsSignaled(); - ExecuteFencedInfoCallback executeFencedInfoCallback = nullptr; - Timing timing = {}; - if (kDeviceFeatureLevel >= kHalVersionV1_3ToApi.featureLevel) { - auto result = - kExecution->computeFenced(waitForHandles, deadline, timeoutDurationAfterFence); - if (!result.ok()) { - LOG(ERROR) << "IExecution::computeFenced() error: " << result.error().message; - VLOG(EXECUTION) << "**computeFenced failed**"; - return {convertErrorStatusToResultCode(result.error().code), -1, nullptr, {}}; - } - std::tie(syncFence, executeFencedInfoCallback) = std::move(result).value(); - } else { - // Fallback to synchronous execution if computeFenced is not supported. - // First wait for all sync fences to be ready. - LOG(INFO) << "No drivers able to handle sync fences, falling back to regular execution"; - for (const auto& fence : waitForHandles) { - if (!fence.hasFd() || fence.getFd() < 0) { - return {ANEURALNETWORKS_BAD_DATA, -1, nullptr, {}}; - } - auto r = fence.syncWait({/* no timeout */}); - if (r != SyncFence::FenceState::SIGNALED) { - LOG(ERROR) << "syncWait failed, fd: " << fence.getFd() << ", state: " << r; - return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, {}}; - } - } - auto result = kExecution->compute(deadline); - if (!result.ok()) { - LOG(ERROR) << "IExecution::compute() error: " << result.error().message; - return {convertErrorStatusToResultCode(result.error().code), -1, nullptr, {}}; - } - std::tie(std::ignore, timing) = result.value(); - } - - int syncFenceFd = -1; - if (syncFence.hasFd()) { - syncFenceFd = dup(syncFence.getFd()); - if (syncFenceFd < 0) { - LOG(ERROR) << "Failed to dup the file descriptor"; - return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, timing}; - } - } - - VLOG(EXECUTION) << "DriverExecution::computeFenced completed"; - return {ANEURALNETWORKS_NO_ERROR, syncFenceFd, executeFencedInfoCallback, timing}; -} - -static Capabilities createCpuCapabilities() { - constexpr Capabilities::PerformanceInfo kPerf = {.execTime = 1.0f, .powerUsage = 1.0f}; - constexpr OperandType operandTypes[] = { - OperandType::FLOAT32, - OperandType::INT32, - OperandType::UINT32, - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - OperandType::TENSOR_QUANT8_ASYMM, - OperandType::BOOL, - OperandType::TENSOR_QUANT16_SYMM, - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_BOOL8, - OperandType::FLOAT16, - OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, - OperandType::TENSOR_QUANT16_ASYMM, - OperandType::TENSOR_QUANT8_SYMM, - OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - }; - - std::vector<Capabilities::OperandPerformance> operandPerformance; - operandPerformance.reserve(std::size(operandTypes)); - std::transform(std::begin(operandTypes), std::end(operandTypes), - std::back_inserter(operandPerformance), [kPerf](OperandType type) { - return Capabilities::OperandPerformance{.type = type, .info = kPerf}; - }); - - auto table = - Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)).value(); - - return Capabilities{ - .relaxedFloat32toFloat16PerformanceScalar = kPerf, - .relaxedFloat32toFloat16PerformanceTensor = kPerf, - .operandPerformance = std::move(table), - .ifPerformance = kPerf, - .whilePerformance = kPerf, - }; + return {ANEURALNETWORKS_NO_ERROR, syncFenceFd, executeFencedCallback, timing}; } // A special abstracted device for the CPU. Only one instance of this class will exist. @@ -837,163 +558,107 @@ const std::string& getVersionString() const override { return kVersionString; } int64_t getFeatureLevel() const override { return kFeatureLevel; } int32_t getType() const override { return ANEURALNETWORKS_DEVICE_CPU; } - bool isUpdatable() const override { return false; } const std::vector<Extension>& getSupportedExtensions() const override { return kSupportedExtensions; } std::vector<bool> getSupportedOperations(const MetaModel& metaModel) const override; - const Capabilities& getCapabilities() const override { return kCapabilities; } - Capabilities::PerformanceInfo getPerformance(OperandType) const override { + PerformanceInfo getPerformance(OperandType) const override { return kPerformance; } + PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override { return kPerformance; } - Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override { + PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override { return kPerformance; } - Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override { - return kPerformance; - } - Capabilities::PerformanceInfo getIfPerformance() const override { return kPerformance; } - Capabilities::PerformanceInfo getWhilePerformance() const override { return kPerformance; } - std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override { - return {/*numModelCache=*/0, /*numDataCache=*/0}; - } + PerformanceInfo getIfPerformance() const override { return kPerformance; } + PerformanceInfo getWhilePerformance() const override { return kPerformance; } bool isCachingSupported() const override { return false; } int wait() const override { return ANEURALNETWORKS_NO_ERROR; } - std::pair<int, std::shared_ptr<RuntimePreparedModel>> prepareModel( + std::pair<int, std::shared_ptr<PreparedModel>> prepareModel( const ModelFactory& makeModel, ExecutionPreference preference, Priority priority, - const OptionalTimePoint& deadline, const CacheInfo& cacheInfo, + const std::optional<Deadline>& deadline, const std::string& cacheDir, const std::optional<CacheToken>& maybeToken) const override; - std::pair<int, std::unique_ptr<RuntimeMemory>> allocate(const MemoryDescriptor& desc, - OperandType type) const override; + std::pair<int, std::unique_ptr<Memory>> allocate(const MemoryDescriptor& desc, + OperandType type) const override; private: CpuDevice() = default; - const int64_t kFeatureLevel = kCurrentNNAPIRuntimeFeatureLevel; + const int64_t kFeatureLevel = __ANDROID_API__; const std::string kName = "nnapi-reference"; -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD const std::string kVersionString = build::GetBuildNumber(); -#else - const std::string kVersionString = "UNKNOWN"; -#endif // NN_COMPATIBILITY_LIBRARY_BUILD // Since the performance is a ratio compared to the CPU performance, // by definition the performance of the CPU is 1.0. - const Capabilities::PerformanceInfo kPerformance = {.execTime = 1.0f, .powerUsage = 1.0f}; - const Capabilities kCapabilities = createCpuCapabilities(); + const PerformanceInfo kPerformance = {.execTime = 1.0f, .powerUsage = 1.0f}; const std::vector<Extension> kSupportedExtensions{/* No extensions. */}; }; -// A special abstracted RuntimePreparedModel for the CPU, constructed by CpuDevice. -class CpuPreparedModel : public RuntimePreparedModel { +// A special abstracted PreparedModel for the CPU, constructed by CpuDevice. +class CpuPreparedModel : public PreparedModel { public: // Factory method for CpuPreparedModel. Returns ANEURALNETWORKS_NO_ERROR and // a prepared model object if successfully created. Returns an error code // and nullptr otherwise. - static std::pair<int, std::shared_ptr<RuntimePreparedModel>> create(Model model); + static std::pair<int, std::shared_ptr<PreparedModel>> create(Model hidlModel); const Device* getDevice() const override { return CpuDevice::get().get(); } - SharedPreparedModel getInterface() const override { return nullptr; } + std::shared_ptr<VersionedIPreparedModel> getInterface() const override { return nullptr; } std::tuple<int, std::vector<OutputShape>, Timing> execute( const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, const SharedBurst& burstController, - MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration) const override; + const std::vector<const Memory*>& memories, + const std::shared_ptr<ExecutionBurstController>& burstController, MeasureTiming measure, + const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration) const override; - GeneralResult<SharedBurst> configureExecutionBurst() const override { return nullptr; } - - std::tuple<int, int, ExecuteFencedInfoCallback, Timing> executeFenced( - const std::vector<ModelArgumentInfo>& inputs, - const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor, - MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration, - const OptionalDuration& timeoutDurationAfterFence) const override; - - std::pair<int, std::shared_ptr<RuntimeExecution>> createReusableExecution( - const std::vector<ModelArgumentInfo>& inputs, - const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, MeasureTiming measure, - const OptionalDuration& loopTimeoutDuration) const override; - - MemoryPreference getMemoryPreference() const override { - return {kPreferredAlignment, kPreferredPadding}; + std::shared_ptr<ExecutionBurstController> configureExecutionBurst( + bool /*preferPowerOverLatency*/) const override { + return nullptr; } + std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing> executeFenced( + const std::vector<ModelArgumentInfo>& inputs, + const std::vector<ModelArgumentInfo>& outputs, + const std::vector<const Memory*>& memories, const std::vector<int>& wait_for, + MeasureTiming measure, const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) const override; + // Prefer to use CpuPreparedModel::create. CpuPreparedModel(Model model, std::vector<RunTimePoolInfo> poolInfos) : mModel(std::move(model)), mModelPoolInfos(std::move(poolInfos)) {} - const Model& getModel() const { return mModel; } - const std::vector<RunTimePoolInfo>& getModelPoolInfos() const { return mModelPoolInfos; } - private: - // TFLite kernels prefers 64 bytes for padding and alignment. - static constexpr uint32_t kPreferredAlignment = 64; - static constexpr uint32_t kPreferredPadding = 64; - const Model mModel; const std::vector<RunTimePoolInfo> mModelPoolInfos; }; -class CpuExecution : public RuntimeExecution { - public: - CpuExecution(const CpuPreparedModel& preparedModel, Request request, - std::vector<RunTimePoolInfo> requestPoolInfos, - OptionalDuration loopTimeoutDuration) - : kPreparedModel(preparedModel), - kRequest(std::move(request)), - kRequestPoolInfos(std::move(requestPoolInfos)), - kLoopTimeoutDuration(std::move(loopTimeoutDuration)) {} - - std::tuple<int, std::vector<OutputShape>, Timing> compute( - const SharedBurst& burstController, const OptionalTimePoint& deadline) const override; - - std::tuple<int, int, ExecuteFencedInfoCallback, Timing> computeFenced( - const std::vector<int>& waitFor, const OptionalTimePoint& deadline, - const OptionalDuration& timeoutDurationAfterFence) const override; - - private: - const CpuPreparedModel& kPreparedModel; - Request kRequest; - std::vector<RunTimePoolInfo> kRequestPoolInfos; - const OptionalDuration kLoopTimeoutDuration; -}; - std::vector<bool> CpuDevice::getSupportedOperations(const MetaModel& metaModel) const { - const Model& model = metaModel.getModel(); - const size_t count = model.main.operations.size(); + const Model& hidlModel = metaModel.getModel(); + const size_t count = hidlModel.main.operations.size(); std::vector<bool> result(count, false); for (size_t i = 0; i < count; i++) { // TODO(b/119870033): Decide whether and how post-P operations would be supported on CPU. // We may want to use the slicer for CpuDevice just as we do for // DriverDevice. - OperationType operationType = model.main.operations[i].type; - result[i] = !isExtension(operationType) && operationType != OperationType::OEM_OPERATION; + OperationType operationType = hidlModel.main.operations[i].type; + result[i] = !isExtensionOperationType(operationType) && + operationType != OperationType::OEM_OPERATION; } return result; } -std::pair<int, std::shared_ptr<RuntimePreparedModel>> CpuDevice::prepareModel( +std::pair<int, std::shared_ptr<PreparedModel>> CpuDevice::prepareModel( const ModelFactory& makeModel, ExecutionPreference preference, Priority priority, - const OptionalTimePoint& deadline, const CacheInfo& /*cacheInfo*/, + const std::optional<Deadline>& deadline, const std::string& /*cacheDir*/, const std::optional<CacheToken>& maybeToken) const { CHECK(!maybeToken.has_value()) << "Should never call prepareModel with cache information on CpuDevice"; const Model model = makeModel(); - if (auto result = validate(model); !result.ok()) { - LOG(ERROR) << "Invalid Model: " << result.error(); - return {ANEURALNETWORKS_OP_FAILED, nullptr}; - } - if (auto result = validate(preference); !result.ok()) { - LOG(ERROR) << "Invalid ExecutionPreference: " << result.error(); - return {ANEURALNETWORKS_OP_FAILED, nullptr}; - } - if (auto result = validate(priority); !result.ok()) { - LOG(ERROR) << "Invalid Priority: " << result.error(); + if (!validateModel(model, ValidationMode::RUNTIME) || + !validateExecutionPreference(preference) || !validatePriority(priority)) { return {ANEURALNETWORKS_OP_FAILED, nullptr}; } if (hasDeadlinePassed(deadline)) { @@ -1003,8 +668,8 @@ return CpuPreparedModel::create(model); } -std::pair<int, std::unique_ptr<RuntimeMemory>> CpuDevice::allocate(const MemoryDescriptor& desc, - OperandType type) const { +std::pair<int, std::unique_ptr<Memory>> CpuDevice::allocate(const MemoryDescriptor& desc, + OperandType type) const { uint32_t size = TypeManager::get()->getSizeOfData(type, desc.dimensions); if (size == 0) { LOG(ERROR) << "CpuDevice::allocate -- does not support unknown dimensions."; @@ -1013,40 +678,45 @@ return MemoryAshmem::create(size); } -std::pair<int, std::shared_ptr<RuntimePreparedModel>> CpuPreparedModel::create(Model model) { +std::pair<int, std::shared_ptr<PreparedModel>> CpuPreparedModel::create(Model hidlModel) { std::vector<RunTimePoolInfo> poolInfos; - if (!setRunTimePoolInfosFromCanonicalMemories(&poolInfos, model.pools)) { + if (!setRunTimePoolInfosFromHidlMemories(&poolInfos, hidlModel.pools)) { return {ANEURALNETWORKS_UNMAPPABLE, nullptr}; } - std::shared_ptr<RuntimePreparedModel> preparedModel = - std::make_shared<CpuPreparedModel>(std::move(model), std::move(poolInfos)); + std::shared_ptr<PreparedModel> preparedModel = + std::make_shared<CpuPreparedModel>(std::move(hidlModel), std::move(poolInfos)); return {ANEURALNETWORKS_NO_ERROR, std::move(preparedModel)}; } static std::tuple<int, std::vector<OutputShape>, Timing> computeOnCpu( const Model& model, const Request& request, const std::vector<RunTimePoolInfo>& modelPoolInfos, - const std::vector<RunTimePoolInfo>& requestPoolInfos, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration) { + const std::vector<RunTimePoolInfo>& requestPoolInfos, + const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration) { NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "computeOnCpu"); CpuExecutor executor; - if (loopTimeoutDuration.has_value()) { - executor.setLoopTimeout(loopTimeoutDuration->count()); + if (loopTimeoutDuration.getDiscriminator() != + OptionalTimeoutDuration::hidl_discriminator::none) { + executor.setLoopTimeout(loopTimeoutDuration.nanoseconds()); } if (deadline.has_value()) { executor.setDeadline(*deadline); } int err = executor.run(model, request, modelPoolInfos, requestPoolInfos); const auto& outputShapes = executor.getOutputShapes(); - return {err, outputShapes, {}}; + return {err, outputShapes, kNoTiming}; } -std::tuple<int, int, ExecuteFencedInfoCallback, Timing> CpuPreparedModel::executeFenced( - const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor, - MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration, const OptionalDuration& duration) const { +std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing> +CpuPreparedModel::executeFenced(const std::vector<ModelArgumentInfo>& inputs, + const std::vector<ModelArgumentInfo>& outputs, + const std::vector<const Memory*>& memories, + const std::vector<int>& waitFor, hal::MeasureTiming measure, + const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + const hal::OptionalTimeoutDuration& duration) const { VLOG(EXECUTION) << "CpuPreparedModel::executeFenced wait for sync fences to signal before execution"; for (int syncFd : waitFor) { @@ -1054,15 +724,15 @@ auto r = syncWait(syncFd, -1); if (r != FenceState::SIGNALED) { LOG(ERROR) << "sync wait failed, fd: " << syncFd; - return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, {}}; + return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, {UINT64_MAX, UINT64_MAX}}; } } } // Update deadline if the timeout duration is closer than the deadline. auto closestDeadline = deadline; - if (duration.has_value()) { - const auto timeoutDurationDeadline = makeDeadline(*duration); + if (duration.getDiscriminator() != OptionalTimeoutDuration::hidl_discriminator::none) { + const auto timeoutDurationDeadline = makeDeadline(duration.nanoseconds()); if (!closestDeadline.has_value() || *closestDeadline > timeoutDurationDeadline) { closestDeadline = timeoutDurationDeadline; } @@ -1073,16 +743,30 @@ return {result, -1, nullptr, timing}; } -static std::tuple<int, Request, std::vector<RunTimePoolInfo>> createCpuRequest( +// Perform computation on NNAPI CPU reference implementation. +// +// Contrary to DriverPreparedModel::execute, the NNAPI CPU reference executor lives in the +// same process as the NNAPI runtime and can take raw pointers. We will create as many pools as +// there are input/output in this method to avoid data copying. +// +// Will choose between sync/async execution according to DeviceManager::mSyncExecCpu. +std::tuple<int, std::vector<OutputShape>, Timing> CpuPreparedModel::execute( const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories) { + const std::vector<const Memory*>& memories, + const std::shared_ptr<ExecutionBurstController>& /*burstController*/, + MeasureTiming /*measure*/, const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration) const { + if (hasDeadlinePassed(deadline)) { + return {ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT, {}, kNoTiming}; + } + std::vector<RunTimePoolInfo> requestPoolInfos; requestPoolInfos.reserve(memories.size()); - for (const RuntimeMemory* mem : memories) { + for (const Memory* mem : memories) { if (std::optional<RunTimePoolInfo> poolInfo = mem->getRunTimePoolInfo()) { requestPoolInfos.emplace_back(*poolInfo); } else { - return {ANEURALNETWORKS_UNMAPPABLE, {}, {}}; + return {ANEURALNETWORKS_UNMAPPABLE, {}, kNoTiming}; } } // Create as many pools as there are input / output. @@ -1094,8 +778,7 @@ ptrArgsLocations.push_back( {.poolIndex = static_cast<uint32_t>(requestPoolInfos.size()), .offset = 0, - .length = argumentInfo.length(), - .padding = argumentInfo.padding()}); + .length = argumentInfo.length()}); requestPoolInfos.emplace_back(RunTimePoolInfo::createFromExistingBuffer( static_cast<uint8_t*>(argumentInfo.buffer()))); } @@ -1108,33 +791,6 @@ Request request; request.inputs = createRequestArguments(inputs, inputPtrArgsLocations); request.outputs = createRequestArguments(outputs, outputPtrArgsLocations); - return {ANEURALNETWORKS_NO_ERROR, std::move(request), std::move(requestPoolInfos)}; -} - -// Perform computation on NNAPI CPU reference implementation. -// -// Contrary to DriverPreparedModel::execute, the NNAPI CPU reference executor lives in the -// same process as the NNAPI runtime and can take raw pointers. We will create as many pools as -// there are input/output in this method to avoid data copying. -// -// Will choose between sync/async execution according to DeviceManager::mSyncExecCpu. -std::tuple<int, std::vector<OutputShape>, Timing> CpuPreparedModel::execute( - const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, const SharedBurst& /*burstController*/, - MeasureTiming /*measure*/, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration) const { - if (hasDeadlinePassed(deadline)) { - return {ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT, {}, {}}; - } - - int nCreateRequest; - Request request; - std::vector<RunTimePoolInfo> requestPoolInfos; - std::tie(nCreateRequest, request, requestPoolInfos) = - createCpuRequest(inputs, outputs, memories); - if (nCreateRequest != ANEURALNETWORKS_NO_ERROR) { - return {nCreateRequest, {}, {}}; - } if (!DeviceManager::get()->syncExecCpu()) { // TODO: use a thread pool @@ -1152,70 +808,6 @@ loopTimeoutDuration); } -std::pair<int, std::shared_ptr<RuntimeExecution>> CpuPreparedModel::createReusableExecution( - const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, MeasureTiming /*measure*/, - const OptionalDuration& loopTimeoutDuration) const { - auto [nCreateRequest, request, requestPoolInfos] = createCpuRequest(inputs, outputs, memories); - if (nCreateRequest != ANEURALNETWORKS_NO_ERROR) { - return {nCreateRequest, nullptr}; - } - auto execution = std::make_shared<CpuExecution>( - *this, std::move(request), std::move(requestPoolInfos), loopTimeoutDuration); - return {ANEURALNETWORKS_NO_ERROR, std::move(execution)}; -} - -std::tuple<int, std::vector<OutputShape>, Timing> CpuExecution::compute( - const SharedBurst& /*burstController*/, const OptionalTimePoint& deadline) const { - if (hasDeadlinePassed(deadline)) { - return {ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT, {}, {}}; - } - - if (!DeviceManager::get()->syncExecCpu()) { - // TODO: use a thread pool - // TODO(mikie): this could have NNTRACE so we could measure the overhead - // of spinning up a new thread. - std::tuple<int, std::vector<OutputShape>, Timing> result = {}; - std::thread([this, &deadline, &result] { - result = computeOnCpu(kPreparedModel.getModel(), kRequest, - kPreparedModel.getModelPoolInfos(), kRequestPoolInfos, deadline, - kLoopTimeoutDuration); - }).join(); - return result; - } - - return computeOnCpu(kPreparedModel.getModel(), kRequest, kPreparedModel.getModelPoolInfos(), - kRequestPoolInfos, deadline, kLoopTimeoutDuration); -} - -std::tuple<int, int, ExecuteFencedInfoCallback, Timing> CpuExecution::computeFenced( - const std::vector<int>& waitFor, const OptionalTimePoint& deadline, - const OptionalDuration& duration) const { - VLOG(EXECUTION) - << "CpuExecution::computeFenced wait for sync fences to signal before execution"; - for (int syncFd : waitFor) { - if (syncFd > 0) { - auto r = syncWait(syncFd, -1); - if (r != FenceState::SIGNALED) { - LOG(ERROR) << "sync wait failed, fd: " << syncFd; - return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, {}}; - } - } - } - - // Update deadline if the timeout duration is closer than the deadline. - auto closestDeadline = deadline; - if (duration.has_value()) { - const auto timeoutDurationDeadline = makeDeadline(*duration); - if (!closestDeadline.has_value() || *closestDeadline > timeoutDurationDeadline) { - closestDeadline = timeoutDurationDeadline; - } - } - - const auto [result, outputShapes, timing] = compute(nullptr, closestDeadline); - return {result, -1, nullptr, timing}; -} - DeviceManager* DeviceManager::get() { static DeviceManager manager; return &manager; @@ -1225,62 +817,35 @@ return CpuDevice::get(); } -std::shared_ptr<Device> DeviceManager::forTest_makeDriverDevice(const SharedDevice& device) { - VLOG(MANAGER) << "forTest_makeDriverDevice(" << device->getName() << ")"; - const auto driverDevice = DriverDevice::create(device); +std::shared_ptr<Device> DeviceManager::forTest_makeDriverDevice(const std::string& name, + const sp<V1_0::IDevice>& device) { + const DeviceFactory makeDevice = [device](bool /*blocking*/) { return device; }; + const auto driverDevice = DriverDevice::create(name, makeDevice); CHECK(driverDevice != nullptr); return driverDevice; } -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD -std::vector<std::shared_ptr<DriverDevice>> getDriverDevices() { - const auto& appInfo = AppInfoFetcher::get()->getAppInfo(); - const bool currentProcessIsOnThePlatform = - appInfo.appIsSystemApp || appInfo.appIsOnVendorImage || appInfo.appIsOnProductImage; - - const bool includeUpdatableDrivers = !currentProcessIsOnThePlatform; - auto devicesAndUpdatability = - hardware::neuralnetworks::service::getDevices(includeUpdatableDrivers); - - std::vector<std::shared_ptr<DriverDevice>> driverDevices; - driverDevices.reserve(devicesAndUpdatability.size()); - for (auto& [device, isDeviceUpdatable] : devicesAndUpdatability) { - driverDevices.push_back(DriverDevice::create(std::move(device), isDeviceUpdatable)); - } - return driverDevices; -} -#else -std::vector<std::shared_ptr<DriverDevice>> getDriverDevices() { - auto devices = getDevices(); - std::vector<std::shared_ptr<DriverDevice>> driverDevices; - driverDevices.reserve(devices.size()); - for (auto& device : devices) { - driverDevices.push_back(DriverDevice::create(std::move(device))); - } - return driverDevices; -} -#endif // NN_COMPATIBILITY_LIBRARY_BUILD - void DeviceManager::findAvailableDevices() { VLOG(MANAGER) << "findAvailableDevices"; // register driver devices - auto driverDevices = getDriverDevices(); - for (auto& driverDevice : driverDevices) { - VLOG(MANAGER) << "Found interface " << driverDevice->getName(); - mDevices.push_back(std::move(driverDevice)); + const auto names = hardware::getAllHalInstanceNames(V1_0::IDevice::descriptor); + for (const auto& name : names) { + VLOG(MANAGER) << "Found interface " << name; + const DeviceFactory makeDevice = [name](bool blocking) { + return blocking ? V1_0::IDevice::getService(name) : V1_0::IDevice::tryGetService(name); + }; + registerDevice(name, makeDevice); } -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD // register CPU fallback device mDevices.push_back(CpuDevice::get()); mDevicesCpuOnly.push_back(CpuDevice::get()); -#endif // NN_COMPATIBILITY_LIBRARY_BUILD } -void DeviceManager::registerDevice(const SharedDevice& device) { - if (auto driverDevice = DriverDevice::create(device)) { - mDevices.push_back(std::move(driverDevice)); +void DeviceManager::registerDevice(const std::string& name, const DeviceFactory& makeDevice) { + if (auto device = DriverDevice::create(name, makeDevice)) { + mDevices.push_back(std::move(device)); } } @@ -1292,6 +857,9 @@ mPartitioning = getProp("debug.nn.partition", kPartitioningDefault); mDebugNNCpuOnly = (getProp("debug.nn.cpuonly") != 0); mSyncExecCpu = (getProp("debug.nn.syncexec-cpu", 1) != 0); + if (!mSyncExecHalSetter) { + mSyncExecHal = (getProp("debug.nn.syncexec-hal", 1) != 0); + } mSyncExecRuntime = (getProp("debug.nn.syncexec-runtime") != 0); #endif // NN_DEBUGGABLE }
diff --git a/runtime/Manager.h b/runtime/Manager.h index e25bf18..d6d4835 100644 --- a/runtime/Manager.h +++ b/runtime/Manager.h
@@ -17,11 +17,7 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_MANAGER_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_MANAGER_H -#include <LegacyUtils.h> #include <android-base/macros.h> -#include <nnapi/IBurst.h> -#include <nnapi/IDevice.h> -#include <nnapi/Types.h> #include <map> #include <memory> @@ -31,91 +27,54 @@ #include <utility> #include <vector> -#include "ExecutionCallback.h" +#include "Callbacks.h" +#include "HalInterfaces.h" #include "Memory.h" +#include "Utils.h" namespace android { namespace nn { // Forward declaration class Device; +class ExecutionBurstController; class MetaModel; class ModelArgumentInfo; - -// A unified interface for a reusable execution with cached resources. -// This object provides no thread-safety guarantee. The caller must guarantee there is at most one -// call to RuntimeExecution::compute or RuntimeExecution::computeFenced on the same RuntimeExecution -// object in flight at a time. -class RuntimeExecution { - DISALLOW_COPY_AND_ASSIGN(RuntimeExecution); - - public: - RuntimeExecution() = default; - virtual ~RuntimeExecution() = default; - - virtual std::tuple<int, std::vector<OutputShape>, Timing> compute( - const SharedBurst& burstController, const OptionalTimePoint& deadline) const = 0; - - // The returned timing information is only valid if the callback is nullptr. - // Returns error_code, sync_fence, callback and timing. - virtual std::tuple<int, int, ExecuteFencedInfoCallback, Timing> computeFenced( - const std::vector<int>& waitFor, const OptionalTimePoint& deadline, - const OptionalDuration& timeoutDurationAfterFence) const = 0; -}; +class VersionedIPreparedModel; // A unified interface for actual driver prepared model as well as the CPU. -class RuntimePreparedModel { - DISALLOW_COPY_AND_ASSIGN(RuntimePreparedModel); +class PreparedModel { + DISALLOW_COPY_AND_ASSIGN(PreparedModel); public: - RuntimePreparedModel() = default; - virtual ~RuntimePreparedModel() = default; + PreparedModel() = default; + virtual ~PreparedModel() = default; virtual const Device* getDevice() const = 0; - virtual SharedPreparedModel getInterface() const = 0; + virtual std::shared_ptr<VersionedIPreparedModel> getInterface() const = 0; // Perform computation with given input/output argument info and memory pools. - virtual std::tuple<int, std::vector<OutputShape>, Timing> execute( + virtual std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> execute( const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, const SharedBurst& burstController, - MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration) const = 0; + const std::vector<const Memory*>& memories, + const std::shared_ptr<ExecutionBurstController>& burstController, + hal::MeasureTiming measure, const std::optional<Deadline>& deadline, + const hal::OptionalTimeoutDuration& loopTimeoutDuration) const = 0; // Perform fenced computation with given input/output argument info and memory pools. // The returned timing information is only valid if the callback is nullptr. // Returns error_code, sync_fence, callback and timing. - virtual std::tuple<int, int, ExecuteFencedInfoCallback, Timing> executeFenced( + virtual std::tuple<int, int, sp<hal::IFencedExecutionCallback>, hal::Timing> executeFenced( const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor, - MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration, - const OptionalDuration& timeoutDurationAfterFence) const = 0; + const std::vector<const Memory*>& memories, const std::vector<int>& waitFor, + hal::MeasureTiming measure, const std::optional<Deadline>& deadline, + const hal::OptionalTimeoutDuration& loopTimeoutDuration, + const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) const = 0; - // Create a reusable execution with given input/output argument info and memory pools. - virtual std::pair<int, std::shared_ptr<RuntimeExecution>> createReusableExecution( - const std::vector<ModelArgumentInfo>& inputs, - const std::vector<ModelArgumentInfo>& outputs, - const std::vector<const RuntimeMemory*>& memories, MeasureTiming measure, - const OptionalDuration& loopTimeoutDuration) const = 0; - - virtual GeneralResult<SharedBurst> configureExecutionBurst() const = 0; - - virtual MemoryPreference getMemoryPreference() const = 0; -}; - -using ModelFactory = std::function<Model()>; - -struct CacheHandles { - std::vector<SharedHandle> modelCache; - std::vector<SharedHandle> dataCache; -}; - -using CacheDir = std::string; - -struct CacheInfo { - std::variant<CacheDir, CacheHandles> variant; + virtual std::shared_ptr<ExecutionBurstController> configureExecutionBurst( + bool preferPowerOverLatency) const = 0; }; // A unified interface for actual driver devices as well as the CPU @@ -131,31 +90,29 @@ virtual const std::string& getVersionString() const = 0; virtual int64_t getFeatureLevel() const = 0; virtual int32_t getType() const = 0; - virtual bool isUpdatable() const = 0; - virtual const std::vector<Extension>& getSupportedExtensions() const = 0; + virtual const std::vector<hal::Extension>& getSupportedExtensions() const = 0; // See the MetaModel class in MetaModel.h for more details. virtual std::vector<bool> getSupportedOperations(const MetaModel& metaModel) const = 0; - virtual const Capabilities& getCapabilities() const = 0; - virtual Capabilities::PerformanceInfo getPerformance(OperandType type) const = 0; - virtual Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const = 0; - virtual Capabilities::PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const = 0; - virtual Capabilities::PerformanceInfo getIfPerformance() const = 0; - virtual Capabilities::PerformanceInfo getWhilePerformance() const = 0; - virtual std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const = 0; + virtual hal::PerformanceInfo getPerformance(hal::OperandType type) const = 0; + virtual hal::PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const = 0; + virtual hal::PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const = 0; + virtual hal::PerformanceInfo getIfPerformance() const = 0; + virtual hal::PerformanceInfo getWhilePerformance() const = 0; virtual bool isCachingSupported() const = 0; virtual int wait() const = 0; - virtual std::pair<int, std::shared_ptr<RuntimePreparedModel>> prepareModel( - const ModelFactory& makeModel, ExecutionPreference preference, Priority priority, - const OptionalTimePoint& deadline, const CacheInfo& cacheInfo, - const std::optional<CacheToken>& maybeToken) const = 0; + virtual std::pair<int, std::shared_ptr<PreparedModel>> prepareModel( + const hal::ModelFactory& makeModel, hal::ExecutionPreference preference, + hal::Priority priority, const std::optional<Deadline>& deadline, + const std::string& cacheDir, + const std::optional<hal::CacheToken>& maybeToken) const = 0; - // The caller is responsible for making sure the MemoryDescriptor only contains - // PreparedModels from the same Device. - virtual std::pair<int, std::unique_ptr<RuntimeMemory>> allocate(const MemoryDescriptor& desc, - OperandType type) const = 0; + // The caller is responsible for making sure the MemoryDescriptor only contains PreparedModels + // from the same Device. + virtual std::pair<int, std::unique_ptr<Memory>> allocate(const MemoryDescriptor& desc, + hal::OperandType type) const = 0; }; // Manages the NN HAL devices. Only one instance of this class will exist. @@ -172,8 +129,13 @@ // For testing only: void setUseCpuOnly(bool useCpuOnly) { mSetCpuOnly = useCpuOnly; } bool getUseCpuOnly() const { return mSetCpuOnly; } + void setSyncExecHal(bool val) { + mSyncExecHal = val; + mSyncExecHalSetter = true; + } bool syncExecCpu() const { return mSyncExecCpu; } + bool syncExecHal() const { return mSyncExecHal; } bool syncExecRuntime() const { return mSyncExecRuntime; } // How to handle graph partitioning? @@ -206,7 +168,10 @@ } // Register a test device. - void forTest_registerDevice(const SharedDevice& device) { registerDevice(device); } + void forTest_registerDevice(const std::string& name, const sp<hal::V1_0::IDevice>& device) { + const hal::DeviceFactory makeDevice = [device](bool /*blocking*/) { return device; }; + registerDevice(name, makeDevice); + } // Re-initialize the list of available devices. void forTest_reInitializeDeviceList() { @@ -216,7 +181,8 @@ } // Make a test device - static std::shared_ptr<Device> forTest_makeDriverDevice(const SharedDevice& device); + static std::shared_ptr<Device> forTest_makeDriverDevice(const std::string& name, + const sp<hal::V1_0::IDevice>& device); bool forTest_isCpuDevice(const ANeuralNetworksDevice* device) const { return reinterpret_cast<const Device*>(device) == getCpuDevice().get(); @@ -227,7 +193,7 @@ DeviceManager(); // Adds a device for the manager to use. - void registerDevice(const SharedDevice& device); + void registerDevice(const std::string& name, const hal::DeviceFactory& makeDevice); void findAvailableDevices(); @@ -244,6 +210,10 @@ // synchronous execution bool mSyncExecCpu = true; + bool mSyncExecHal = true; // Call executeSynchronously*() when available on device. + bool mSyncExecHalSetter = false; // Has mSyncExecHal been set by setSyncExecHal()? + // If so, don't allow the setting to be overridden + // by system property debug.nn.syncexec-hal bool mSyncExecRuntime = false; static const uint32_t kPartitioningDefault = kPartitioningWithFallback; @@ -252,8 +222,6 @@ bool mStrictSlicing = false; }; -std::vector<SharedDevice> getDevices(); - } // namespace nn } // namespace android
diff --git a/runtime/Memory.cpp b/runtime/Memory.cpp index 33f3ab7..e0bd6b9 100644 --- a/runtime/Memory.cpp +++ b/runtime/Memory.cpp
@@ -18,14 +18,10 @@ #include "Memory.h" -#include <CpuExecutor.h> -#include <LegacyUtils.h> #include <android-base/scopeguard.h> #include <android/hardware_buffer.h> -#include <nnapi/IBurst.h> -#include <nnapi/SharedMemory.h> -#include <nnapi/TypeUtils.h> -#include <nnapi/Types.h> +#include <cutils/native_handle.h> +#include <vndk/hardware_buffer.h> #include <algorithm> #include <memory> @@ -35,18 +31,25 @@ #include <vector> #include "CompilationBuilder.h" +#include "CpuExecutor.h" +#include "ExecutionBurstController.h" #include "Manager.h" +#include "MemoryUtils.h" #include "TypeManager.h" +#include "Utils.h" namespace android { namespace nn { + +using namespace hal; + namespace { // The validator for a client-managed single-dimensional memory pool with a known size. // The memory may be used for request inputs, request outputs, or model constants. class SizedMemoryValidator : public MemoryValidatorBase { public: - explicit SizedMemoryValidator(uint32_t size) : kSize(size) {} + SizedMemoryValidator(uint32_t size) : kSize(size) {} bool validate(const CompilationBuilder*, IOType, uint32_t, const ANeuralNetworksOperandType*, uint32_t offset, uint32_t length) const override { @@ -180,39 +183,50 @@ } // namespace -RuntimeMemory::RuntimeMemory(SharedMemory memory) : kMemory(std::move(memory)) { - CHECK(kMemory != nullptr); - mValidator = std::make_unique<SizedMemoryValidator>(nn::getSize(kMemory)); -} +Memory::Memory(hal::hidl_memory memory) + : kHidlMemory(std::move(memory)), + mValidator(std::make_unique<SizedMemoryValidator>(kHidlMemory.size())) {} -RuntimeMemory::RuntimeMemory(SharedMemory memory, std::unique_ptr<MemoryValidatorBase> validator) - : kMemory(std::move(memory)), mValidator(std::move(validator)) { - CHECK(kMemory != nullptr); -} +Memory::Memory(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator) + : kHidlMemory(std::move(memory)), mValidator(std::move(validator)) {} -RuntimeMemory::RuntimeMemory(SharedBuffer buffer) : kBuffer(std::move(buffer)) {} +Memory::Memory(sp<hal::IBuffer> buffer, uint32_t token) + : kBuffer(std::move(buffer)), kToken(token) {} -Request::MemoryPool RuntimeMemory::getMemoryPool() const { - if (kBuffer != nullptr) { - return kBuffer->getToken(); +Memory::~Memory() { + for (const auto& [ptr, weakBurst] : mUsedBy) { + if (const std::shared_ptr<ExecutionBurstController> burst = weakBurst.lock()) { + burst->freeMemory(getKey()); + } } - return kMemory; } -std::optional<RunTimePoolInfo> RuntimeMemory::getRunTimePoolInfo() const { +Request::MemoryPool Memory::getMemoryPool() const { + Request::MemoryPool pool; + if (kToken > 0) { + pool.token(kToken); + } else { + pool.hidlMemory(kHidlMemory); + } + return pool; +} + +std::optional<RunTimePoolInfo> Memory::getRunTimePoolInfo() const { std::lock_guard<std::mutex> guard(mMutex); if (!mHasCachedRunTimePoolInfo) { - mCachedRunTimePoolInfo = RunTimePoolInfo::createFromMemory(kMemory); + mCachedRunTimePoolInfo = RunTimePoolInfo::createFromHidlMemory(kHidlMemory); mHasCachedRunTimePoolInfo = true; } return mCachedRunTimePoolInfo; } -void RuntimeMemory::hold(const IBurst::OptionalCacheHold& cacheHold) const { - if (cacheHold != nullptr) { - std::lock_guard<std::mutex> guard(mMutex); - mHold.insert(cacheHold); - } +intptr_t Memory::getKey() const { + return reinterpret_cast<intptr_t>(this); +} + +void Memory::usedBy(const std::shared_ptr<ExecutionBurstController>& burst) const { + std::lock_guard<std::mutex> guard(mMutex); + mUsedBy.emplace(burst.get(), burst); } static int copyHidlMemories(const std::optional<RunTimePoolInfo>& src, @@ -232,37 +246,37 @@ return ANEURALNETWORKS_NO_ERROR; } -int copyIBufferToMemory(const SharedBuffer& src, const SharedMemory& dst) { +int copyIBufferToHidlMemory(const sp<IBuffer>& src, const hidl_memory& dst) { const auto ret = src->copyTo(dst); - if (!ret.has_value()) { - LOG(ERROR) << "ANeuralNetworksMemory_copy failure: " << ret.error().message; - return convertErrorStatusToResultCode(ret.error().code); + if (!ret.isOk()) { + LOG(ERROR) << "ANeuralNetworksMemory_copy failure: " << ret.description(); + return ANEURALNETWORKS_OP_FAILED; } - return ANEURALNETWORKS_NO_ERROR; + return convertErrorStatusToResultCode(static_cast<ErrorStatus>(ret)); } -int copyMemoryToIBuffer(const SharedMemory& src, const SharedBuffer& dst, - const std::vector<uint32_t>& dimensions) { +int copyHidlMemoryToIBuffer(const hidl_memory& src, const sp<IBuffer>& dst, + const std::vector<uint32_t>& dimensions) { const auto ret = dst->copyFrom(src, dimensions); - if (!ret.has_value()) { - LOG(ERROR) << "ANeuralNetworksMemory_copy failure: " << ret.error().message; - return convertErrorStatusToResultCode(ret.error().code); + if (!ret.isOk()) { + LOG(ERROR) << "ANeuralNetworksMemory_copy failure: " << ret.description(); + return ANEURALNETWORKS_OP_FAILED; } - return ANEURALNETWORKS_NO_ERROR; + return convertErrorStatusToResultCode(static_cast<ErrorStatus>(ret)); } -static int copyIBuffers(const SharedBuffer& src, const SharedBuffer& dst, +static int copyIBuffers(const sp<IBuffer>& src, const sp<IBuffer>& dst, const MemoryValidatorBase::Metadata& srcMetadata) { - const auto [n, memoryAHWB] = MemoryRuntimeAHWB::create(srcMetadata.logicalSize); + const auto [n, memory] = MemoryRuntimeAHWB::create(srcMetadata.logicalSize); NN_RETURN_IF_ERROR(n); - const SharedMemory& memory = memoryAHWB->getMemory(); - if (!validate(memory).ok()) return ANEURALNETWORKS_OUT_OF_MEMORY; - NN_RETURN_IF_ERROR(copyIBufferToMemory(src, memory)); - NN_RETURN_IF_ERROR(copyMemoryToIBuffer(memory, dst, srcMetadata.dimensions)); + const hidl_memory& hidlMemory = memory->getHidlMemory(); + if (!hidlMemory.valid()) return ANEURALNETWORKS_OUT_OF_MEMORY; + NN_RETURN_IF_ERROR(copyIBufferToHidlMemory(src, hidlMemory)); + NN_RETURN_IF_ERROR(copyHidlMemoryToIBuffer(hidlMemory, dst, srcMetadata.dimensions)); return ANEURALNETWORKS_NO_ERROR; } -static int copyInternal(const RuntimeMemory& src, const RuntimeMemory& dst) { +static int copyInternal(const Memory& src, const Memory& dst) { if (&src == &dst) return ANEURALNETWORKS_NO_ERROR; if (!src.getValidator().isInitialized()) { @@ -276,23 +290,24 @@ return ANEURALNETWORKS_BAD_DATA; } - bool srcHasMemory = validate(src.getMemory()).ok(); - bool dstHasMemory = validate(dst.getMemory()).ok(); + bool srcHasHidlMemory = src.getHidlMemory().valid(); + bool dstHasHidlMemory = dst.getHidlMemory().valid(); bool srcHasIBuffer = src.getIBuffer() != nullptr; bool dstHasIBuffer = dst.getIBuffer() != nullptr; if (srcHasIBuffer && dstHasIBuffer) { return copyIBuffers(src.getIBuffer(), dst.getIBuffer(), srcMetadata); - } else if (srcHasMemory && dstHasMemory) { + } else if (srcHasHidlMemory && dstHasHidlMemory) { return copyHidlMemories(src.getRunTimePoolInfo(), dst.getRunTimePoolInfo()); - } else if (srcHasMemory && dstHasIBuffer) { - return copyMemoryToIBuffer(src.getMemory(), dst.getIBuffer(), srcMetadata.dimensions); - } else if (srcHasIBuffer && dstHasMemory) { - return copyIBufferToMemory(src.getIBuffer(), dst.getMemory()); + } else if (srcHasHidlMemory && dstHasIBuffer) { + return copyHidlMemoryToIBuffer(src.getHidlMemory(), dst.getIBuffer(), + srcMetadata.dimensions); + } else if (srcHasIBuffer && dstHasHidlMemory) { + return copyIBufferToHidlMemory(src.getIBuffer(), dst.getHidlMemory()); } return ANEURALNETWORKS_OP_FAILED; } -int RuntimeMemory::copy(const RuntimeMemory& src, const RuntimeMemory& dst) { +int Memory::copy(const Memory& src, const Memory& dst) { int n = copyInternal(src, dst); dst.getValidator().setInitialized(n == ANEURALNETWORKS_NO_ERROR); return n; @@ -307,7 +322,7 @@ } int MemoryBuilder::addRole(const CompilationBuilder& compilation, IOType ioType, uint32_t index, - float prob) { + float freq) { const char* tag = ioType == IOType::INPUT ? "addInputRole" : "addOutputRole"; if (badState(tag)) { return ANEURALNETWORKS_BAD_STATE; @@ -318,7 +333,7 @@ return ANEURALNETWORKS_BAD_DATA; } - std::vector<std::tuple<const RuntimePreparedModel*, IOType, uint32_t>> roles; + std::vector<std::tuple<const PreparedModel*, IOType, uint32_t>> roles; auto callback = [&roles](const auto* preparedModel, IOType type, uint32_t index) { roles.emplace_back(preparedModel, type, index); }; @@ -367,15 +382,15 @@ return ANEURALNETWORKS_BAD_DATA; } - if (prob > 1.0f || prob <= 0.0f) { - LOG(ERROR) << "ANeuralNetworksMemoryDesc_" << tag << " -- invalid frequency " << prob; + if (freq > 1.0f || freq <= 0.0f) { + LOG(ERROR) << "ANeuralNetworksMemoryDesc_" << tag << " -- invalid frequency " << freq; return ANEURALNETWORKS_BAD_DATA; } mRoles.emplace(&compilation, ioType, index); - for (const auto& [preparedModel, type, ind] : roles) { + for (const auto [preparedModel, type, ind] : roles) { uint32_t modelIndex = mDesc.preparedModels.add(preparedModel); - BufferRole role = {.modelIndex = modelIndex, .ioIndex = ind, .probability = prob}; + BufferRole role = {.modelIndex = modelIndex, .ioIndex = ind, .frequency = freq}; if (type == IOType::INPUT) { mDesc.inputRoles.push_back(role); } else { @@ -406,10 +421,10 @@ static void logMemoryDescriptorToInfo(const MemoryDescriptor& desc, const Operand& operand) { LOG(INFO) << "MemoryDescriptor start"; - LOG(INFO) << " Data type: " << operand.type; - LOG(INFO) << " Scale: " << operand.scale; - LOG(INFO) << " Zero point: " << operand.zeroPoint; - LOG(INFO) << " Extra params: " << operand.extraParams; + LOG(INFO) << " Data type: " << toString(operand.type); + LOG(INFO) << " Scale: " << toString(operand.scale); + LOG(INFO) << " Zero point: " << toString(operand.zeroPoint); + LOG(INFO) << " Extra params: " << toString(operand.extraParams); LOG(INFO) << " Dimensions: " << toString(desc.dimensions); LOG(INFO) << " Prepared models [" << desc.preparedModels.size() << "]:"; for (const auto* preparedModel : desc.preparedModels) { @@ -417,11 +432,11 @@ } LOG(INFO) << " Input roles [" << desc.inputRoles.size() << "]:"; for (const auto& usage : desc.inputRoles) { - LOG(INFO) << " " << usage; + LOG(INFO) << " " << toString(usage); } LOG(INFO) << " Output roles [" << desc.outputRoles.size() << "]:"; for (const auto& usage : desc.outputRoles) { - LOG(INFO) << " " << usage; + LOG(INFO) << " " << toString(usage); } LOG(INFO) << "MemoryDescriptor end"; } @@ -457,7 +472,7 @@ mAllocator = nullptr; } mSupportsAhwb = std::all_of(devices.begin(), devices.end(), [](const auto* device) { - return device->getFeatureLevel() >= kHalVersionV1_3ToApi.featureLevel; + return device->getFeatureLevel() >= __ANDROID_API_R__; }); mShouldFallback = std::none_of(mRoles.begin(), mRoles.end(), [](const auto& role) { const auto* cb = std::get<const CompilationBuilder*>(role); @@ -469,14 +484,14 @@ return ANEURALNETWORKS_NO_ERROR; } -std::pair<int, std::unique_ptr<RuntimeMemory>> MemoryBuilder::allocate() const { +std::pair<int, std::unique_ptr<Memory>> MemoryBuilder::allocate() const { if (!mFinished) { LOG(ERROR) << "ANeuralNetworksMemory_createFromDesc -- passed an unfinished descriptor"; return {ANEURALNETWORKS_BAD_STATE, nullptr}; } int n = ANEURALNETWORKS_OP_FAILED; - std::unique_ptr<RuntimeMemory> memory; + std::unique_ptr<Memory> memory; CHECK(mOperand.has_value()); // Try allocate the memory on device. @@ -506,57 +521,84 @@ } std::pair<int, std::unique_ptr<MemoryAshmem>> MemoryAshmem::create(uint32_t size) { - auto memory = createSharedMemory(size); - if (!memory.has_value()) { - LOG(ERROR) << "RuntimeMemory::create() failed: " << memory.error().message; - return {convertErrorStatusToResultCode(memory.error().code), nullptr}; - } - auto mapping = map(memory.value()); - if (!mapping.has_value()) { - LOG(ERROR) << "RuntimeMemory::create() map failed: " << mapping.error().message; - return {convertErrorStatusToResultCode(mapping.error().code), nullptr}; + hidl_memory hidlMemory = allocateSharedMemory(size); + sp<IMemory> mapped = mapMemory(hidlMemory); + if (mapped == nullptr || mapped->getPointer() == nullptr) { + LOG(ERROR) << "Memory::create failed"; + return {ANEURALNETWORKS_OUT_OF_MEMORY, nullptr}; } return {ANEURALNETWORKS_NO_ERROR, - std::make_unique<MemoryAshmem>(std::move(memory).value(), std::move(mapping).value())}; + std::make_unique<MemoryAshmem>(std::move(mapped), std::move(hidlMemory))}; } uint8_t* MemoryAshmem::getPointer() const { - return static_cast<uint8_t*>(std::get<void*>(kMapping.pointer)); + return static_cast<uint8_t*>(static_cast<void*>(kMappedMemory->getPointer())); } -MemoryAshmem::MemoryAshmem(SharedMemory memory, Mapping mapping) - : RuntimeMemory(std::move(memory)), kMapping(std::move(mapping)) {} +MemoryAshmem::MemoryAshmem(sp<IMemory> mapped, hidl_memory memory) + : Memory(std::move(memory)), kMappedMemory(std::move(mapped)) {} std::pair<int, std::unique_ptr<MemoryFd>> MemoryFd::create(size_t size, int prot, int fd, size_t offset) { - auto memory = createSharedMemoryFromFd(size, prot, fd, offset); - if (!memory.has_value()) { - LOG(ERROR) << "Failed to create memory from fd: " << memory.error().message; - return {convertErrorStatusToResultCode(memory.error().code), nullptr}; + if (size == 0 || fd < 0) { + LOG(ERROR) << "Invalid size or fd"; + return {ANEURALNETWORKS_BAD_DATA, nullptr}; } - return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFd>(std::move(memory).value())}; + + // Duplicate the file descriptor so MemoryFd owns its own version. + int dupfd = dup(fd); + if (dupfd == -1) { + LOG(ERROR) << "Failed to dup the fd"; + // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct + // error to return here? + return {ANEURALNETWORKS_UNEXPECTED_NULL, nullptr}; + } + + // Create a temporary native handle to own the dupfd. + native_handle_t* nativeHandle = native_handle_create(1, 3); + if (nativeHandle == nullptr) { + LOG(ERROR) << "Failed to create native_handle"; + // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct + // error to return here? + return {ANEURALNETWORKS_UNEXPECTED_NULL, nullptr}; + } + nativeHandle->data[0] = dupfd; + nativeHandle->data[1] = prot; + const uint64_t bits = static_cast<uint64_t>(offset); + nativeHandle->data[2] = (int32_t)(uint32_t)(bits & 0xffffffff); + nativeHandle->data[3] = (int32_t)(uint32_t)(bits >> 32); + + // Create a hidl_handle which owns the native handle and fd so that we don't + // have to manually clean either the native handle or the fd. + hardware::hidl_handle hidlHandle; + hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true); + + // Push the hidl_handle into a hidl_memory object. The hidl_memory object is + // responsible for cleaning the hidl_handle, the native handle, and the fd. + hidl_memory hidlMemory = hidl_memory("mmap_fd", std::move(hidlHandle), size); + + return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFd>(std::move(hidlMemory))}; } -MemoryFd::MemoryFd(SharedMemory memory) : RuntimeMemory(std::move(memory)) {} +MemoryFd::MemoryFd(hidl_memory memory) : Memory(std::move(memory)) {} std::pair<int, std::unique_ptr<MemoryAHWB>> MemoryAHWB::create(const AHardwareBuffer& ahwb) { - auto memory = createSharedMemoryFromAHWB(const_cast<AHardwareBuffer*>(&ahwb), - /*takeOwnership=*/false); - if (!memory.has_value()) { - LOG(ERROR) << "Failed to create memory from AHWB: " << memory.error().message; - return {convertErrorStatusToResultCode(memory.error().code), nullptr}; - } - + AHardwareBuffer_Desc bufferDesc; + AHardwareBuffer_describe(&ahwb, &bufferDesc); + const native_handle_t* handle = AHardwareBuffer_getNativeHandle(&ahwb); + hidl_memory hidlMemory; std::unique_ptr<MemoryValidatorBase> validator; - if (isAhwbBlob(memory.value())) { - validator = std::make_unique<SizedMemoryValidator>(nn::getSize(memory.value())); + if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) { + hidlMemory = hidl_memory("hardware_buffer_blob", handle, bufferDesc.width); + validator = std::make_unique<SizedMemoryValidator>(bufferDesc.width); } else { + // memory size is not used. + hidlMemory = hidl_memory("hardware_buffer", handle, 0); validator = std::make_unique<AHardwareBufferNonBlobValidator>(); } - - auto memoryAHWB = std::make_unique<MemoryAHWB>(std::move(memory).value(), std::move(validator)); - return {ANEURALNETWORKS_NO_ERROR, std::move(memoryAHWB)}; -} + auto memory = std::make_unique<MemoryAHWB>(std::move(hidlMemory), std::move(validator)); + return {ANEURALNETWORKS_NO_ERROR, std::move(memory)}; +}; std::pair<int, std::unique_ptr<MemoryRuntimeAHWB>> MemoryRuntimeAHWB::create(uint32_t size) { AHardwareBuffer* ahwb = nullptr; @@ -574,38 +616,57 @@ LOG(ERROR) << "Failed to allocate BLOB mode AHWB."; return {ANEURALNETWORKS_OP_FAILED, nullptr}; } + auto allocateGuard = base::make_scope_guard([&ahwb]() { AHardwareBuffer_release(ahwb); }); - auto memory = createSharedMemoryFromAHWB(ahwb, /*takeOWnership=*/true); - if (!memory.has_value()) { - LOG(ERROR) << "Failed to allocate BLOB mode AHWB: " << memory.error().message; - return {convertErrorStatusToResultCode(memory.error().code), nullptr}; + void* buffer = nullptr; + err = AHardwareBuffer_lock(ahwb, usage, -1, nullptr, &buffer); + if (err != 0 || buffer == nullptr) { + LOG(ERROR) << "Failed to lock BLOB mode AHWB."; + return {ANEURALNETWORKS_OP_FAILED, nullptr}; } - auto mapping = map(memory.value()); - if (!mapping.has_value()) { - LOG(ERROR) << "Failed to map BLOB mode AHWB: " << mapping.error().message; - return {convertErrorStatusToResultCode(mapping.error().code), nullptr}; + auto lockGuard = base::make_scope_guard([&ahwb]() { AHardwareBuffer_unlock(ahwb, nullptr); }); + + const native_handle_t* handle = AHardwareBuffer_getNativeHandle(ahwb); + if (handle == nullptr) { + LOG(ERROR) << "Failed to retrieve the native handle from the AHWB."; + return {ANEURALNETWORKS_OP_FAILED, nullptr}; } - auto memoryAHWB = std::make_unique<MemoryRuntimeAHWB>(std::move(memory).value(), - std::move(mapping).value()); - return {ANEURALNETWORKS_NO_ERROR, std::move(memoryAHWB)}; + + hidl_memory hidlMemory = hidl_memory("hardware_buffer_blob", handle, desc.width); + auto memory = std::make_unique<MemoryRuntimeAHWB>(std::move(hidlMemory), ahwb, + static_cast<uint8_t*>(buffer)); + allocateGuard.Disable(); + lockGuard.Disable(); + return {ANEURALNETWORKS_NO_ERROR, std::move(memory)}; } -uint8_t* MemoryRuntimeAHWB::getPointer() const { - return static_cast<uint8_t*>(std::get<void*>(kMapping.pointer)); +MemoryRuntimeAHWB::MemoryRuntimeAHWB(hal::hidl_memory memory, AHardwareBuffer* ahwb, + uint8_t* buffer) + : Memory(std::move(memory)), mAhwb(ahwb), mBuffer(buffer) { + CHECK(mAhwb != nullptr); + CHECK(mBuffer != nullptr); } -MemoryRuntimeAHWB::MemoryRuntimeAHWB(SharedMemory memory, Mapping mapping) - : RuntimeMemory(std::move(memory)), kMapping(std::move(mapping)) {} +MemoryRuntimeAHWB::~MemoryRuntimeAHWB() { + AHardwareBuffer_unlock(mAhwb, nullptr); + AHardwareBuffer_release(mAhwb); +} -std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(SharedBuffer buffer) { +std::pair<int, std::unique_ptr<MemoryFromDevice>> MemoryFromDevice::create(sp<hal::IBuffer> buffer, + uint32_t token) { if (buffer == nullptr) { LOG(ERROR) << "nullptr IBuffer for device memory."; return {ANEURALNETWORKS_OP_FAILED, nullptr}; } - return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFromDevice>(std::move(buffer))}; -} + if (token <= 0) { + LOG(ERROR) << "Invalid token for device memory: " << token; + return {ANEURALNETWORKS_OP_FAILED, nullptr}; + } + return {ANEURALNETWORKS_NO_ERROR, std::make_unique<MemoryFromDevice>(std::move(buffer), token)}; +}; -MemoryFromDevice::MemoryFromDevice(SharedBuffer buffer) : RuntimeMemory(std::move(buffer)) {} +MemoryFromDevice::MemoryFromDevice(sp<hal::IBuffer> buffer, uint32_t token) + : Memory(std::move(buffer), token) {} } // namespace nn } // namespace android
diff --git a/runtime/Memory.h b/runtime/Memory.h index 850f70a..56bf81d 100644 --- a/runtime/Memory.h +++ b/runtime/Memory.h
@@ -17,15 +17,9 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_MEMORY_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_MEMORY_H -#include <CpuExecutor.h> -#include <LegacyUtils.h> #include <android-base/macros.h> -#include <android-base/scopeguard.h> -#include <nnapi/IBuffer.h> -#include <nnapi/IBurst.h> -#include <nnapi/SharedMemory.h> -#include <nnapi/Validation.h> #include <sys/mman.h> +#include <vndk/hardware_buffer.h> #include <algorithm> #include <map> @@ -37,15 +31,19 @@ #include <utility> #include <vector> +#include "CpuExecutor.h" +#include "HalInterfaces.h" #include "NeuralNetworks.h" +#include "Utils.h" namespace android { namespace nn { class CompilationBuilder; class Device; +class ExecutionBurstController; class ModelBuilder; -class RuntimePreparedModel; +class PreparedModel; // A utility template class to accumulate multiple objects and assign each // a distinct index number, starting with 0. @@ -95,11 +93,12 @@ }; using CompilationRole = std::tuple<const CompilationBuilder*, IOType, uint32_t>; +using StepRoleCallback = std::function<void(const PreparedModel*, IOType, uint32_t)>; struct MemoryDescriptor { std::vector<uint32_t> dimensions; - ObjectTracker<RuntimePreparedModel> preparedModels; - std::vector<BufferRole> inputRoles, outputRoles; + ObjectTracker<PreparedModel> preparedModels; + std::vector<hal::BufferRole> inputRoles, outputRoles; }; class MemoryValidatorBase { @@ -145,7 +144,7 @@ // The data type, scale, zero point, and extra parameters of the target operand. // Other fields will be ignored, including dimensions, lifetime, location, etc. // Set to std::nullopt if undefined. - std::optional<Operand> operand; + std::optional<hal::Operand> operand; }; virtual Metadata getMetadata() const = 0; @@ -159,23 +158,25 @@ virtual bool isInitialized() const { return true; } }; -int copyIBufferToMemory(const SharedBuffer& src, const SharedMemory& dst); +int copyIBufferToHidlMemory(const sp<hal::IBuffer>& src, const hal::hidl_memory& dst); -int copyMemoryToIBuffer(const SharedMemory& src, const SharedBuffer& dst, - const std::vector<uint32_t>& dimensions); +int copyHidlMemoryToIBuffer(const hal::hidl_memory& src, const sp<hal::IBuffer>& dst, + const std::vector<uint32_t>& dimensions); // Represents a memory region. -class RuntimeMemory { +class Memory { // Disallow copy and assign to prevent slicing - DISALLOW_COPY_AND_ASSIGN(RuntimeMemory); + DISALLOW_COPY_AND_ASSIGN(Memory); public: - virtual ~RuntimeMemory() = default; + // Custom destructor to notify any ExecutionBurstControllers currently using + // this memory that it is being freed. + virtual ~Memory(); - Request::MemoryPool getMemoryPool() const; - const SharedMemory& getMemory() const { return kMemory; } - const SharedBuffer& getIBuffer() const { return kBuffer; } - virtual uint32_t getSize() const { return nn::getSize(getMemory()); } + hal::Request::MemoryPool getMemoryPool() const; + const hal::hidl_memory& getHidlMemory() const { return kHidlMemory; } + const sp<hal::IBuffer>& getIBuffer() const { return kBuffer; } + virtual uint32_t getSize() const { return getHidlMemory().size(); } virtual std::optional<RunTimePoolInfo> getRunTimePoolInfo() const; MemoryValidatorBase& getValidator() const { @@ -187,31 +188,40 @@ mValidator = std::move(validator); } - // This function binds `cacheHold` to the memory object, holding it for as long as the Memory - // object is alive. This keeps the cache present while the Memory object is alive. If - // `cacheHold` is null, this function is a no-op. - void hold(const IBurst::OptionalCacheHold& cacheHold) const; + // Unique key representing this memory object. + intptr_t getKey() const; - static int copy(const RuntimeMemory& src, const RuntimeMemory& dst); + // Marks a burst object as currently using this memory. When this + // memory object is destroyed, it will automatically free this memory from + // the bursts' memory cache. + void usedBy(const std::shared_ptr<ExecutionBurstController>& burst) const; + + static int copy(const Memory& src, const Memory& dst); protected: - explicit RuntimeMemory(SharedMemory memory); - RuntimeMemory(SharedMemory memory, std::unique_ptr<MemoryValidatorBase> validator); - explicit RuntimeMemory(SharedBuffer buffer); + Memory(hal::hidl_memory memory); + Memory(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator); + Memory(sp<hal::IBuffer> buffer, uint32_t token); - // The canonical representation for this memory. We will use one of the - // following values when communicating with the drivers. - const SharedMemory kMemory = std::make_shared<const Memory>(); - const SharedBuffer kBuffer; + // The HIDL representation for this memory. We will use one of the following values + // when communicating with the drivers. + const hal::hidl_memory kHidlMemory; + const sp<hal::IBuffer> kBuffer; + const uint32_t kToken = 0; std::unique_ptr<MemoryValidatorBase> mValidator; private: mutable std::mutex mMutex; - - // This set contains `CacheHold` objects, holding it for as long as the Memory object is alive. - // This keeps the cache present while the Memory object is alive. - mutable std::set<IBurst::OptionalCacheHold> mHold; + // mUsedBy is essentially a set of burst objects which use this Memory + // object. However, std::weak_ptr does not have comparison operations nor a + // std::hash implementation. This is because it is either a valid pointer + // (non-null) if the shared object is still alive, or it is null if the + // object has been freed. To circumvent this, mUsedBy is a map with the raw + // pointer as the key and the weak_ptr as the value. + mutable std::unordered_map<const ExecutionBurstController*, + std::weak_ptr<ExecutionBurstController>> + mUsedBy; mutable std::optional<RunTimePoolInfo> mCachedRunTimePoolInfo; mutable bool mHasCachedRunTimePoolInfo = false; @@ -228,7 +238,7 @@ int finish(); - std::pair<int, std::unique_ptr<RuntimeMemory>> allocate() const; + std::pair<int, std::unique_ptr<Memory>> allocate() const; private: bool badState(const char* name) const; @@ -243,7 +253,7 @@ // Keep track of the data type, scale, zero point, and extra parameters of the target operand. // Other fields will be ignored, including dimensions, lifetime, location, etc. // It is std::nullopt if no usage has been specified yet. - std::optional<Operand> mOperand; + std::optional<hal::Operand> mOperand; // Once the descriptor has been finished, we should not allow further modifications. bool mFinished = false; @@ -261,7 +271,7 @@ bool mShouldFallback = true; }; -class MemoryAshmem : public RuntimeMemory { +class MemoryAshmem : public Memory { public: // Creates a memory object containing a new android shared memory ("ashmem") // object of the size specified in bytes. Because this ashmem region can be @@ -278,20 +288,21 @@ uint8_t* getPointer() const; std::optional<RunTimePoolInfo> getRunTimePoolInfo() const override { - return RunTimePoolInfo::createFromExistingBuffer(getPointer(), nn::getSize(kMemory)); + return RunTimePoolInfo::createFromExistingBuffer(getPointer(), kHidlMemory.size()); } // prefer using MemoryAshmem::create - MemoryAshmem(SharedMemory memory, Mapping mapped); + MemoryAshmem(sp<hal::IMemory> mapped, hal::hidl_memory memory); private: - const Mapping kMapping; + const sp<hal::IMemory> kMappedMemory; }; -class MemoryFd : public RuntimeMemory { +class MemoryFd : public Memory { public: - // Create a memory object based on input size, prot, and fd. This function - // duplicates the provided fd, and owns the duplicate. + // Create a memory object based on input size, prot, and fd that can be sent + // across HIDL. This function duplicates the provided fd, and owns the + // duplicate. // // On success, returns ANEURALNETWORKS_NO_ERROR and a memory object. // On error, returns the appropriate NNAPI error code and nullptr. @@ -299,10 +310,10 @@ size_t offset); // prefer using MemoryFd::create - explicit MemoryFd(SharedMemory memory); + MemoryFd(hal::hidl_memory memory); }; -class MemoryAHWB : public RuntimeMemory { +class MemoryAHWB : public Memory { public: // Create a memory object to keep track of (but not take ownership of) the // provided AHardwareBuffer handle. @@ -312,11 +323,11 @@ static std::pair<int, std::unique_ptr<MemoryAHWB>> create(const AHardwareBuffer& ahwb); // prefer using MemoryAHWB::create - MemoryAHWB(SharedMemory memory, std::unique_ptr<MemoryValidatorBase> validator) - : RuntimeMemory(std::move(memory), std::move(validator)) {} + MemoryAHWB(hal::hidl_memory memory, std::unique_ptr<MemoryValidatorBase> validator) + : Memory(std::move(memory), std::move(validator)) {} }; -class MemoryRuntimeAHWB : public RuntimeMemory { +class MemoryRuntimeAHWB : public Memory { public: // Create a memory object containing a new BLOB-mode AHardwareBuffer memory // object of the size specified in bytes. The created memory is managed and @@ -329,33 +340,36 @@ // Get a pointer to the content of the memory. The returned pointer is // valid for the lifetime of the MemoryRuntimeAHWB object. This call always // returns non-null because it was validated during MemoryRuntimeAHWB::create. - uint8_t* getPointer() const; + uint8_t* getPointer() const { return mBuffer; } std::optional<RunTimePoolInfo> getRunTimePoolInfo() const override { - return RunTimePoolInfo::createFromExistingBuffer(getPointer(), nn::getSize(kMemory)); + return RunTimePoolInfo::createFromExistingBuffer(getPointer(), kHidlMemory.size()); } // prefer using MemoryRuntimeAHWB::create - MemoryRuntimeAHWB(SharedMemory memory, Mapping mapping); + MemoryRuntimeAHWB(hal::hidl_memory memory, AHardwareBuffer* ahwb, uint8_t* buffer); + ~MemoryRuntimeAHWB(); private: - const Mapping kMapping; + AHardwareBuffer* const mAhwb; + uint8_t* const mBuffer; }; -class MemoryFromDevice : public RuntimeMemory { +class MemoryFromDevice : public Memory { public: // Create a memory object to keep track of a driver-allocated device memory. // The memory is recognized by the driver via a token. // // On success, returns ANEURALNETWORKS_NO_ERROR and a memory object. // On error, returns the appropriate NNAPI error code and nullptr. - static std::pair<int, std::unique_ptr<MemoryFromDevice>> create(SharedBuffer buffer); + static std::pair<int, std::unique_ptr<MemoryFromDevice>> create(sp<hal::IBuffer> buffer, + uint32_t token); // prefer using MemoryFromDevice::create - explicit MemoryFromDevice(SharedBuffer buffer); + MemoryFromDevice(sp<hal::IBuffer> buffer, uint32_t token); }; -using MemoryTracker = ObjectTracker<RuntimeMemory>; +using MemoryTracker = ObjectTracker<Memory>; } // namespace nn } // namespace android
diff --git a/runtime/ModelArgumentInfo.cpp b/runtime/ModelArgumentInfo.cpp index 927375d..cf24004 100644 --- a/runtime/ModelArgumentInfo.cpp +++ b/runtime/ModelArgumentInfo.cpp
@@ -18,24 +18,26 @@ #include "ModelArgumentInfo.h" -#include <LegacyUtils.h> - #include <algorithm> #include <utility> #include <vector> +#include "HalInterfaces.h" #include "NeuralNetworks.h" #include "TypeManager.h" +#include "Utils.h" namespace android { namespace nn { +using namespace hal; + static const std::pair<int, ModelArgumentInfo> kBadDataModelArgumentInfo{ANEURALNETWORKS_BAD_DATA, {}}; std::pair<int, ModelArgumentInfo> ModelArgumentInfo::createFromPointer( - const Operand& operand, const ANeuralNetworksOperandType* type, void* data, uint32_t length, - bool paddingEnabled) { + const Operand& operand, const ANeuralNetworksOperandType* type, void* data, + uint32_t length) { if ((data == nullptr) != (length == 0)) { const char* dataPtrMsg = data ? "NOT_NULLPTR" : "NULLPTR"; LOG(ERROR) << "Data pointer must be nullptr if and only if length is zero (data = " @@ -44,7 +46,6 @@ } ModelArgumentInfo ret; - uint32_t neededLength = 0; if (data == nullptr) { ret.mState = ModelArgumentInfo::HAS_NO_VALUE; } else { @@ -52,60 +53,41 @@ return {n, ModelArgumentInfo()}; } if (operand.type != OperandType::OEM) { - neededLength = TypeManager::get()->getSizeOfData(operand.type, ret.mDimensions); - if (neededLength > length) { + uint32_t neededLength = + TypeManager::get()->getSizeOfData(operand.type, ret.mDimensions); + if (neededLength != length && neededLength != 0) { LOG(ERROR) << "Setting argument with invalid length: " << length - << ", minimum length expected: " << neededLength; + << ", expected length: " << neededLength; return kBadDataModelArgumentInfo; } } ret.mState = ModelArgumentInfo::POINTER; } - const uint32_t rawLength = neededLength == 0 ? length : neededLength; - const uint32_t padding = length - rawLength; - - if (!paddingEnabled && padding > 0) { - LOG(ERROR) << "Setting argument with padded length without enabling input and output " - "padding -- length: " - << length << ", expected length: " << neededLength; - return kBadDataModelArgumentInfo; - } - ret.mBuffer = data; - ret.mLocationAndLength = {.poolIndex = 0, .offset = 0, .length = rawLength, .padding = padding}; + ret.mLocationAndLength = {.poolIndex = 0, .offset = 0, .length = length}; return {ANEURALNETWORKS_NO_ERROR, ret}; } std::pair<int, ModelArgumentInfo> ModelArgumentInfo::createFromMemory( const Operand& operand, const ANeuralNetworksOperandType* type, uint32_t poolIndex, - uint32_t offset, uint32_t length, bool paddingEnabled) { + uint32_t offset, uint32_t length) { ModelArgumentInfo ret; if (int n = ret.updateDimensionInfo(operand, type)) { return {n, ModelArgumentInfo()}; } const bool isMemorySizeKnown = offset != 0 || length != 0; - uint32_t neededLength = 0; if (isMemorySizeKnown && operand.type != OperandType::OEM) { - neededLength = TypeManager::get()->getSizeOfData(operand.type, ret.mDimensions); - if (neededLength > length) { + const uint32_t neededLength = + TypeManager::get()->getSizeOfData(operand.type, ret.mDimensions); + if (neededLength != length && neededLength != 0) { LOG(ERROR) << "Setting argument with invalid length: " << length - << " (offset: " << offset << "), minimum length expected: " << neededLength; + << " (offset: " << offset << "), expected length: " << neededLength; return kBadDataModelArgumentInfo; } } - const uint32_t rawLength = neededLength == 0 ? length : neededLength; - const uint32_t padding = length - rawLength; - - if (!paddingEnabled && padding > 0) { - LOG(ERROR) << "Setting argument with padded length without enabling input and output " - "padding -- length: " - << length << ", offset: " << offset << ", expected length: " << neededLength; - return kBadDataModelArgumentInfo; - } ret.mState = ModelArgumentInfo::MEMORY; - ret.mLocationAndLength = { - .poolIndex = poolIndex, .offset = offset, .length = rawLength, .padding = padding}; + ret.mLocationAndLength = {.poolIndex = poolIndex, .offset = offset, .length = length}; ret.mBuffer = nullptr; return {ANEURALNETWORKS_NO_ERROR, ret}; } @@ -113,56 +95,36 @@ int ModelArgumentInfo::updateDimensionInfo(const Operand& operand, const ANeuralNetworksOperandType* newType) { if (newType == nullptr) { - mInitialDimensions = operand.dimensions; + mDimensions = operand.dimensions; } else { const uint32_t count = newType->dimensionCount; - mInitialDimensions = std::vector<uint32_t>(count); - std::copy(&newType->dimensions[0], &newType->dimensions[count], mInitialDimensions.begin()); + mDimensions = hidl_vec<uint32_t>(count); + std::copy(&newType->dimensions[0], &newType->dimensions[count], mDimensions.begin()); } - mDimensions = mInitialDimensions; return ANEURALNETWORKS_NO_ERROR; } -Request::Argument ModelArgumentInfo::createRequestArgument() const { - switch (mState) { - case ModelArgumentInfo::POINTER: { - Request::Argument arg = {.lifetime = Request::Argument::LifeTime::POINTER, - .location = mLocationAndLength, - .dimensions = mDimensions}; - arg.location.pointer = mBuffer; - return arg; - } - case ModelArgumentInfo::MEMORY: - return {.lifetime = Request::Argument::LifeTime::POOL, - .location = mLocationAndLength, - .dimensions = mDimensions}; - case ModelArgumentInfo::HAS_NO_VALUE: - return {.lifetime = Request::Argument::LifeTime::NO_VALUE}; - case ModelArgumentInfo::UNSPECIFIED: - LOG(FATAL) << "Invalid state: UNSPECIFIED"; - return {}; - }; - LOG(FATAL) << "Invalid state: " << mState; - return {}; -} - -std::vector<Request::Argument> createRequestArguments( +hidl_vec<RequestArgument> createRequestArguments( const std::vector<ModelArgumentInfo>& argumentInfos, const std::vector<DataLocation>& ptrArgsLocations) { const size_t count = argumentInfos.size(); - std::vector<Request::Argument> ioInfos(count); + hidl_vec<RequestArgument> ioInfos(count); uint32_t ptrArgsIndex = 0; for (size_t i = 0; i < count; i++) { const auto& info = argumentInfos[i]; switch (info.state()) { case ModelArgumentInfo::POINTER: - ioInfos[i] = {.lifetime = Request::Argument::LifeTime::POOL, + ioInfos[i] = {.hasNoValue = false, .location = ptrArgsLocations[ptrArgsIndex++], .dimensions = info.dimensions()}; break; case ModelArgumentInfo::MEMORY: + ioInfos[i] = {.hasNoValue = false, + .location = info.locationAndLength(), + .dimensions = info.dimensions()}; + break; case ModelArgumentInfo::HAS_NO_VALUE: - ioInfos[i] = info.createRequestArgument(); + ioInfos[i] = {.hasNoValue = true}; break; default: CHECK(false);
diff --git a/runtime/ModelArgumentInfo.h b/runtime/ModelArgumentInfo.h index 4e45d63..22dd34c 100644 --- a/runtime/ModelArgumentInfo.h +++ b/runtime/ModelArgumentInfo.h
@@ -17,12 +17,12 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_MODEL_ARGUMENT_INFO_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_MODEL_ARGUMENT_INFO_H -#include <LegacyUtils.h> - #include <utility> #include <vector> +#include "HalInterfaces.h" #include "NeuralNetworks.h" +#include "Utils.h" namespace android { namespace nn { @@ -38,12 +38,11 @@ ModelArgumentInfo() {} static std::pair<int, ModelArgumentInfo> createFromPointer( - const Operand& operand, const ANeuralNetworksOperandType* type, - void* data /* nullptr means HAS_NO_VALUE */, uint32_t length, - bool paddingEnabled = true); + const hal::Operand& operand, const ANeuralNetworksOperandType* type, + void* data /* nullptr means HAS_NO_VALUE */, uint32_t length); static std::pair<int, ModelArgumentInfo> createFromMemory( - const Operand& operand, const ANeuralNetworksOperandType* type, uint32_t poolIndex, - uint32_t offset, uint32_t length, bool paddingEnabled = true); + const hal::Operand& operand, const ANeuralNetworksOperandType* type, uint32_t poolIndex, + uint32_t offset, uint32_t length); enum State { POINTER, MEMORY, HAS_NO_VALUE, UNSPECIFIED }; @@ -56,10 +55,6 @@ return mBuffer; } - const std::vector<uint32_t>& initialDimensions() const { - CHECK(mState == POINTER || mState == MEMORY); - return mInitialDimensions; - } const std::vector<uint32_t>& dimensions() const { CHECK(mState == POINTER || mState == MEMORY); return mDimensions; @@ -83,64 +78,39 @@ return mLocationAndLength.length; } - uint32_t padding() const { - CHECK(mState == POINTER || mState == MEMORY); - return mLocationAndLength.padding; - } - - const DataLocation& locationAndLength() const { + const hal::DataLocation& locationAndLength() const { CHECK_EQ(mState, MEMORY); return mLocationAndLength; } - DataLocation& locationAndLength() { + hal::DataLocation& locationAndLength() { CHECK_EQ(mState, MEMORY); return mLocationAndLength; } - // Restore updatable properties to creation-time values. - void reset() { - mDimensions = mInitialDimensions; - mIsSufficient = true; - } - - // Convert ModelArgumentInfo to canonical Request::Argument. Unlike createRequestArguments, - // this method will keep the pointer type in the canonical type. - Request::Argument createRequestArgument() const; - private: - int updateDimensionInfo(const Operand& operand, const ANeuralNetworksOperandType* newType); + int updateDimensionInfo(const hal::Operand& operand, const ANeuralNetworksOperandType* newType); // Whether the argument was specified as being in a Memory, as a pointer, // has no value, or has not been specified. // If POINTER then: // mLocationAndLength.length is valid. + // mDimensions is valid. // mBuffer is valid. // If MEMORY then: // mLocationAndLength.{poolIndex, offset, length} is valid. - // In both MEMORY and POINTER cases: - // mInitialDimensions is valid. // mDimensions is valid. - // mIsSufficient is valid. - - // Properties that are fixed at creation. - State mState = UNSPECIFIED; - void* mBuffer = nullptr; - std::vector<uint32_t> mInitialDimensions; - // TODO(b/183021356): This field is logically fixed at creation, but actually updated in - // StepExecutor::mapInputOrOutput when constructing StepExecutor ModelArgumentInfos from - // ExecutionBuilder ModelArgumentInfos. We should find a way to avoid this update. - DataLocation mLocationAndLength; - - // Properties that are updatable after creation. - std::vector<uint32_t> mDimensions; - bool mIsSufficient = true; + State mState = UNSPECIFIED; // fixed at creation + void* mBuffer = nullptr; // fixed at creation + hal::DataLocation mLocationAndLength; // can be updated after creation + std::vector<uint32_t> mDimensions; // can be updated after creation + bool mIsSufficient = true; // can be updated after creation }; -// Convert ModelArgumentInfo to canonical Request::Argument. For pointer arguments, use the location +// Convert ModelArgumentInfo to HIDL RequestArgument. For pointer arguments, use the location // information in ptrArgsLocations. -std::vector<Request::Argument> createRequestArguments( +hal::hidl_vec<hal::RequestArgument> createRequestArguments( const std::vector<ModelArgumentInfo>& argumentInfos, - const std::vector<DataLocation>& ptrArgsLocations); + const std::vector<hal::DataLocation>& ptrArgsLocations); } // namespace nn } // namespace android
diff --git a/runtime/ModelBuilder.cpp b/runtime/ModelBuilder.cpp index f306d61..73ec1af 100644 --- a/runtime/ModelBuilder.cpp +++ b/runtime/ModelBuilder.cpp
@@ -18,9 +18,6 @@ #include "ModelBuilder.h" -#include <GraphDump.h> -#include <LegacyUtils.h> - #include <algorithm> #include <map> #include <memory> @@ -29,29 +26,21 @@ #include <vector> #include "CompilationBuilder.h" +#include "GraphDump.h" #include "Manager.h" #include "TypeManager.h" +#include "Utils.h" +#include "ValidateHal.h" namespace android { namespace nn { +using namespace hal; + // The maximum number of operands and operations that a model may have. const uint32_t MAX_NUMBER_OF_OPERANDS = 0xFFFFFFFE; const uint32_t MAX_NUMBER_OF_OPERATIONS = 0xFFFFFFFE; -#define NN_VALIDATE_NULL_OR_SIZED(tag, data, length) \ - if ((data == nullptr) != (length == 0)) { \ - LOG(ERROR) << "ANeuralNetworksModel_" << tag << " " << #data << " is " \ - << (data == nullptr ? "null" : "not null") << " but " << #length << " is " \ - << length; \ - return ANEURALNETWORKS_BAD_DATA; \ - } - -template <typename Type> -static std::vector<Type> makeVector(const Type* data, uint32_t length) { - return length > 0 ? std::vector<Type>(data, data + length) : std::vector<Type>(); -} - bool ModelBuilder::badState(const char* name) { if (mCompletedModel) { LOG(ERROR) << "ANeuralNetworksModel_" << name << " can't modify after model finished"; @@ -77,7 +66,7 @@ } OperandType operandType = static_cast<OperandType>(type.type); - if (isExtension(operandType) && !TypeManager::get()->areExtensionsAllowed()) { + if (isExtensionOperandType(operandType) && !TypeManager::get()->areExtensionsAllowed()) { LOG(ERROR) << "Extensions are not supported for this process."; return ANEURALNETWORKS_BAD_DATA; } @@ -88,34 +77,29 @@ } const Extension::OperandTypeInformation* info = nullptr; - if (isExtension(operandType) && + if (isExtensionOperandType(operandType) && !TypeManager::get()->getExtensionOperandTypeInfo(operandType, &info)) { - LOG(ERROR) << "Extension operand type " << operandType << " is not registered"; + LOG(ERROR) << "Extension operand type " << toString(operandType) << " is not registered"; return ANEURALNETWORKS_BAD_DATA; } - NN_VALIDATE_NULL_OR_SIZED("addOperand", type.dimensions, type.dimensionCount); - Operand operand = { - .type = operandType, - .dimensions = makeVector(type.dimensions, type.dimensionCount), - .scale = type.scale, - .zeroPoint = type.zeroPoint, - .lifetime = Operand::LifeTime::TEMPORARY_VARIABLE, - .location = {.poolIndex = 0, .offset = 0, .length = 0}, - .extraParams = {}, - }; - if (auto result = validateOperandType(operand, info, "ANeuralNetworksModel_addOperand", true); - !result.ok()) { - LOG(ERROR) << result.error(); - return ANEURALNETWORKS_BAD_DATA; - } - + NN_RETURN_IF_ERROR(validateOperandType(type, info, "ANeuralNetworksModel_addOperand", true)); size_t idx = mOperands.size(); if (idx >= MAX_NUMBER_OF_OPERANDS) { LOG(ERROR) << "ANeuralNetworksModel_addOperand exceed max operands"; return ANEURALNETWORKS_BAD_DATA; } - mOperands.push_back(std::move(operand)); + mOperands.push_back({ + .type = operandType, + .dimensions = + hidl_vec<uint32_t>(type.dimensions, type.dimensions + type.dimensionCount), + .numberOfConsumers = 0, + .scale = type.scale, + .zeroPoint = type.zeroPoint, + .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + .extraParams = OperandExtraParams(), + }); mHasOEMOperand |= isOemOperand; return ANEURALNETWORKS_NO_ERROR; } @@ -132,9 +116,13 @@ return ANEURALNETWORKS_BAD_DATA; } Operand& operand = mOperands[index]; - NN_VALIDATE_NULL_OR_SIZED("setOperandValue", buffer, length); if (buffer == nullptr) { - operand.lifetime = Operand::LifeTime::NO_VALUE; + if (length) { + LOG(ERROR) << "ANeuralNetworksModel_setOperandValue buffer is nullptr but length is " + "not 0"; + return ANEURALNETWORKS_BAD_DATA; + } + operand.lifetime = OperandLifeTime::NO_VALUE; // The location is unused and is set to zeros. operand.location = {.poolIndex = 0, .offset = 0, .length = 0}; } else { @@ -162,14 +150,14 @@ uint32_t existingSize = static_cast<uint32_t>(mSmallOperandValues.size()); uint32_t extraBytes = alignBytesNeeded(existingSize, valueLength); mSmallOperandValues.resize(existingSize + extraBytes + valueLength); - operand.lifetime = Operand::LifeTime::CONSTANT_COPY; + operand.lifetime = OperandLifeTime::CONSTANT_COPY; operand.location = { .poolIndex = 0, .offset = existingSize + extraBytes, .length = valueLength}; memcpy(&mSmallOperandValues[operand.location.offset], buffer, valueLength); VLOG(MODEL) << "Copied small value to offset " << operand.location.offset; } else { VLOG(MODEL) << "Saving large value"; - operand.lifetime = Operand::LifeTime::CONSTANT_REFERENCE; + operand.lifetime = OperandLifeTime::CONSTANT_REFERENCE; // The values for poolIndex and offset will be set when the model is finished. typedef decltype(operand.location.poolIndex) PoolIndexType; typedef decltype(operand.location.offset) OffsetType; @@ -203,14 +191,13 @@ return ANEURALNETWORKS_BAD_DATA; } Operand& operand = mOperands[index]; - operand.lifetime = Operand::LifeTime::SUBGRAPH; + operand.lifetime = OperandLifeTime::SUBGRAPH; operand.location = { .poolIndex = 0, .offset = static_cast<uint32_t>(mReferencedModels.size()), .length = 0, }; mReferencedModels.push_back(value); - mReferencedSubgraphsForValidation.push_back(value->makeModel().main); return ANEURALNETWORKS_NO_ERROR; } @@ -228,21 +215,18 @@ } Operand& operand = mOperands[index]; - NN_VALIDATE_NULL_OR_SIZED("setOperandSymmPerChannelQuantParams", channelQuant.scales, - channelQuant.scaleCount); - Operand::SymmPerChannelQuantParams extraParams = { - .scales = makeVector(channelQuant.scales, channelQuant.scaleCount), - .channelDim = channelQuant.channelDim, - }; - if (auto result = validateOperandSymmPerChannelQuantParams( - operand, extraParams, "ANeuralNetworksModel_setOperandSymmPerChannelQuantParams"); - !result.ok()) { - LOG(ERROR) << result.error(); + if (!validateOperandSymmPerChannelQuantParams( + operand, channelQuant, + "ANeuralNetworksModel_setOperandSymmPerChannelQuantParams")) { return ANEURALNETWORKS_BAD_DATA; } switch (operand.type) { case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: - operand.extraParams = std::move(extraParams); + operand.extraParams.channelQuant({ + .scales = hidl_vec<float>(channelQuant.scales, + channelQuant.scales + channelQuant.scaleCount), + .channelDim = channelQuant.channelDim, + }); break; default: LOG(ERROR) << "ANeuralNetworksModel_setOperandSymmPerChannelQuantParams " @@ -264,20 +248,29 @@ } Operand& operand = mOperands[index]; - if (!isExtension(operand.type)) { + if (data == nullptr && length != 0) { + LOG(ERROR) << "ANeuralNetworksModel_setOperandExtensionData data is nullptr but length is " + << length; + return ANEURALNETWORKS_BAD_DATA; + } + if (data != nullptr && length == 0) { + LOG(ERROR) << "ANeuralNetworksModel_setOperandExtensionData data is not nullptr but length " + << "is zero"; + return ANEURALNETWORKS_BAD_DATA; + } + if (!isExtensionOperandType(operand.type)) { LOG(ERROR) << "ANeuralNetworksModel_setOperandExtensionData " << "setting extension data for a base operand type " << static_cast<int32_t>(operand.type); return ANEURALNETWORKS_BAD_DATA; } - NN_VALIDATE_NULL_OR_SIZED("setOperandExtensionData", data, length); if (data == nullptr) { - operand.extraParams = {}; + operand.extraParams.none(); } else { - operand.extraParams = Operand::ExtensionParams( - std::vector<uint8_t>(reinterpret_cast<const uint8_t*>(data), - reinterpret_cast<const uint8_t*>(data) + length)); + operand.extraParams.extension( + hidl_vec<uint8_t>(reinterpret_cast<const uint8_t*>(data), + reinterpret_cast<const uint8_t*>(data) + length)); } return ANEURALNETWORKS_NO_ERROR; } @@ -290,7 +283,7 @@ size_t poolSize = 0; for (LargeValue& l : mLargeOperandValues) { Operand& operand = mOperands[l.operandIndex]; - CHECK_EQ(operand.lifetime, Operand::LifeTime::CONSTANT_REFERENCE); + nnAssert(operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE); poolSize += alignBytesNeeded(poolSize, operand.location.length); operand.location.offset = poolSize; poolSize += operand.location.length; @@ -315,8 +308,8 @@ return ANEURALNETWORKS_NO_ERROR; } -int ModelBuilder::setOperandValueFromMemory(uint32_t index, const RuntimeMemory* memory, - uint32_t offset, size_t length) { +int ModelBuilder::setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset, + size_t length) { VLOG(MODEL) << __func__ << " for operand " << index << " offset " << offset << " size " << length; if (badState("setOperandValueFromMemory")) { @@ -341,12 +334,12 @@ return ANEURALNETWORKS_BAD_DATA; } // Set compilation = nullptr to indicate that the memory is used for a model constant. - // In this case, IOType::INPUT is a placeholder value that is ignored by the validator. - if (!memory->getValidator().validate(/*compilation=*/nullptr, /*placeholder*/ IOType::INPUT, - index, nullptr, offset, length)) { + // In this case, IOType::INPUT is a dummy value that is ignored by the validator. + if (!memory->getValidator().validate(/*compilation=*/nullptr, /*dummy*/ IOType::INPUT, index, + nullptr, offset, length)) { return ANEURALNETWORKS_BAD_DATA; } - operand.lifetime = Operand::LifeTime::CONSTANT_REFERENCE; + operand.lifetime = OperandLifeTime::CONSTANT_REFERENCE; operand.location = {.poolIndex = mMemories.add(memory), .offset = offset, .length = static_cast<uint32_t>(length)}; @@ -361,7 +354,7 @@ } OperationType operationType = static_cast<OperationType>(type); - if (isExtension(operationType) && !TypeManager::get()->areExtensionsAllowed()) { + if (isExtensionOperationType(operationType) && !TypeManager::get()->areExtensionsAllowed()) { LOG(ERROR) << "Extensions are not supported for this process."; return ANEURALNETWORKS_BAD_DATA; } @@ -369,33 +362,40 @@ LOG(WARNING) << "OEM_OPERATION is deprecated. Use Extensions instead."; } - if (!isExtension(operationType)) { + if (!isExtensionOperationType(operationType)) { if (!validCode(kNumberOfOperationTypes, kNumberOfOperationTypesOEM, type)) { LOG(ERROR) << "ANeuralNetworksModel_addOperation invalid operation type " << type; return ANEURALNETWORKS_BAD_DATA; } - } else { - const Extension* extension; - uint16_t extensionPrefix = getExtensionPrefix(static_cast<uint32_t>(operationType)); - if (!TypeManager::get()->getExtensionInfo(extensionPrefix, &extension)) { - LOG(ERROR) << "Extension operation type " << operationType << " is not recognized"; - return ANEURALNETWORKS_BAD_DATA; - } } - NN_VALIDATE_NULL_OR_SIZED("addOperation", inputs, inputCount); - NN_VALIDATE_NULL_OR_SIZED("addOperation", outputs, outputCount); - Operation operation = { - .type = operationType, - .inputs = makeVector(inputs, inputCount), - .outputs = makeVector(outputs, outputCount), + auto isValidSubgraphReference = [this](const Operand& modelOperand) -> bool { + NN_RET_CHECK(modelOperand.type == OperandType::SUBGRAPH) + << "Unexpected operand type: " << toString(modelOperand.type); + NN_RET_CHECK_LT(modelOperand.location.offset, referencedModelCount()) + << "Invalid subgraph model reference"; + return true; }; - if (auto result = validateOperationButNotOperands(operation, mOperands, - mReferencedSubgraphsForValidation); - !result.ok()) { - LOG(ERROR) << "Invalid Operation: " << result.error(); - return ANEURALNETWORKS_BAD_DATA; - } + auto getInputCount = [this](const Operand& modelOperand) -> uint32_t { + return getReferencedModel(modelOperand)->inputCount(); + }; + auto getOutputCount = [this](const Operand& modelOperand) -> uint32_t { + return getReferencedModel(modelOperand)->outputCount(); + }; + auto getInputOperand = [this](const Operand& modelOperand, uint32_t index) -> const Operand* { + return &getReferencedModel(modelOperand)->getInputOperand(index); + }; + auto getOutputOperand = [this](const Operand& modelOperand, uint32_t index) -> const Operand* { + return &getReferencedModel(modelOperand)->getOutputOperand(index); + }; + NN_RETURN_IF_ERROR(validateOperation( + type, inputCount, inputs, outputCount, outputs, mOperands, HalVersion::LATEST, + {.isValidSubgraphReference = isValidSubgraphReference, + .getSubgraphInputCount = getInputCount, + .getSubgraphOutputCount = getOutputCount, + .getSubgraphInputOperand = getInputOperand, + .getSubgraphOutputOperand = getOutputOperand, + .allowControlFlowOperationWithOperandOfUnknownSize = true})); uint32_t operationIndex = operationCount(); if (operationIndex >= MAX_NUMBER_OF_OPERATIONS) { @@ -403,9 +403,16 @@ return ANEURALNETWORKS_BAD_DATA; } - mOperations.push_back(std::move(operation)); + mOperations.push_back({ + .type = operationType, + .inputs = hidl_vec<uint32_t>(inputs, inputs + inputCount), + .outputs = hidl_vec<uint32_t>(outputs, outputs + outputCount), + }); + for (uint32_t i : mOperations.back().inputs) { + mOperands[i].numberOfConsumers++; + } mHasOEMOperation |= (operationType == OperationType::OEM_OPERATION); - mHasExtensionOperation |= isExtension(operationType); + mHasExtensionOperation |= isExtensionOperationType(operationType); return ANEURALNETWORKS_NO_ERROR; } @@ -416,25 +423,21 @@ return ANEURALNETWORKS_BAD_STATE; } - NN_VALIDATE_NULL_OR_SIZED("identifyInputsAndOutputs", inputs, inputCount); - if (auto result = validateOperandList(makeVector(inputs, inputCount), operandCount(), - "ANeuralNetworksModel_identifyInputsAndOutputs inputs"); - !result.ok()) { - LOG(ERROR) << result.error(); - return ANEURALNETWORKS_BAD_DATA; + int n = validateOperandList(inputCount, inputs, operandCount(), + "ANeuralNetworksModel_identifyInputsAndOutputs inputs"); + if (n != ANEURALNETWORKS_NO_ERROR) { + return n; } - NN_VALIDATE_NULL_OR_SIZED("identifyInputsAndOutputs", outputs, outputCount); - if (auto result = validateOperandList(makeVector(outputs, outputCount), operandCount(), - "ANeuralNetworksModel_identifyInputsAndOutputs outputs"); - !result.ok()) { - LOG(ERROR) << result.error(); - return ANEURALNETWORKS_BAD_DATA; + n = validateOperandList(outputCount, outputs, operandCount(), + "ANeuralNetworksModel_identifyInputsAndOutputs outputs"); + if (n != ANEURALNETWORKS_NO_ERROR) { + return n; } // Makes a copy of the index list, validates the arguments, and changes // the lifetime info of the corresponding operand. auto setArguments = [&](std::vector<uint32_t>* indexVector, uint32_t indexCount, - const uint32_t* indexList, Operand::LifeTime lifetime) -> bool { + const uint32_t* indexList, OperandLifeTime lifetime) -> bool { indexVector->resize(indexCount); for (uint32_t i = 0; i < indexCount; i++) { const uint32_t operandIndex = indexList[i]; @@ -448,7 +451,7 @@ } (*indexVector)[i] = operandIndex; Operand& operand = mOperands[operandIndex]; - if (operand.lifetime != Operand::LifeTime::TEMPORARY_VARIABLE) { + if (operand.lifetime != OperandLifeTime::TEMPORARY_VARIABLE) { LOG(ERROR) << "ANeuralNetworksModel_identifyInputsAndOutputs Can't set operand " << operandIndex << " to be an input or output. Check that it's not a constant or " @@ -460,8 +463,8 @@ return true; }; - if (!setArguments(&mInputIndexes, inputCount, inputs, Operand::LifeTime::SUBGRAPH_INPUT) || - !setArguments(&mOutputIndexes, outputCount, outputs, Operand::LifeTime::SUBGRAPH_OUTPUT)) { + if (!setArguments(&mInputIndexes, inputCount, inputs, OperandLifeTime::SUBGRAPH_INPUT) || + !setArguments(&mOutputIndexes, outputCount, outputs, OperandLifeTime::SUBGRAPH_OUTPUT)) { return ANEURALNETWORKS_BAD_DATA; } @@ -514,15 +517,15 @@ return ANEURALNETWORKS_BAD_DATA; } - // TODO: Modify validation so that it can be called without creating a Model. + // TODO: Modify validation so that it can be called without creating a HAL Model. // NOTE: Must sortIntoRunOrder() before validation; validator expects operations // to have been sorted. // NOTE: Must copyLargeValuesToSharedMemory() before validation; otherwise, // a CONSTANT_REFERENCE operand will not have correct .poolIndex, and // validation will not work properly. - const Model modelForValidation = makeModel(); - if (auto result = validate(modelForValidation); !result.ok()) { - LOG(ERROR) << "ANeuralNetworksModel_finish called on invalid model: " << result.error(); + const Model modelForValidation = makeHidlModel(); + if (!validateModel(modelForValidation, ValidationMode::RUNTIME)) { + LOG(ERROR) << "ANeuralNetworksModel_finish called on invalid model"; mInvalidModel = true; return ANEURALNETWORKS_BAD_DATA; } @@ -539,12 +542,12 @@ static void logRemoval(const Operation& operation, uint32_t count, const std::vector<Operand>& operands) { std::ostringstream message; - message << "Operation " << operation.type << " with inputs {"; + message << "Operation " << toString(operation.type) << " with inputs {"; for (uint32_t i = 0; i < operation.inputs.size(); ++i) { if (i != 0) { message << ", "; } - message << operands[operation.inputs[i]].type; + message << toString(operands[operation.inputs[i]].type); } message << "} has trailing optional inputs set to default values. Removing " << count << " trailing inputs."; @@ -563,6 +566,9 @@ const uint32_t inputCount = operation.inputs.size(); CHECK_LT(count, inputCount); const uint32_t newInputCount = inputCount - count; + for (uint32_t i = newInputCount; i < inputCount; ++i) { + --mOperands[operation.inputs[i]].numberOfConsumers; + } operation.inputs.resize(newInputCount); } } @@ -577,16 +583,12 @@ // See countMatchingTrailingArguments(). static bool matchesSpec(TailSpec spec, const Operand& operand, const std::vector<uint8_t>& mSmallOperandValues) { - const void* valuePtr = nullptr; - if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY) { - valuePtr = static_cast<const void*>(&mSmallOperandValues[operand.location.offset]); - } else if (operand.lifetime == Operand::LifeTime::POINTER) { - valuePtr = std::get<const void*>(operand.location.pointer); - } else { + if (operand.lifetime != OperandLifeTime::CONSTANT_COPY) { // CONSTANT_REFERENCE operands are not supported to avoid mapping memory // during compilation. return false; } + auto valuePtr = static_cast<const void*>(&mSmallOperandValues[operand.location.offset]); switch (spec) { case TailSpec::BOOL_FALSE: return operand.type == OperandType::BOOL && @@ -816,8 +818,8 @@ count = 0; for (uint32_t operandIndex : mOperations[operationIndex].inputs) { auto lifetime = mOperands[operandIndex].lifetime; - if (lifetime == Operand::LifeTime::TEMPORARY_VARIABLE || - lifetime == Operand::LifeTime::SUBGRAPH_OUTPUT) { + if (lifetime == OperandLifeTime::TEMPORARY_VARIABLE || + lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { count++; operandToOperations.insert( std::pair<uint32_t, uint32_t>(operandIndex, operationIndex)); @@ -863,54 +865,54 @@ return true; } -// A helper class to simplify state management when creating a Model. -class ModelBuilder::ModelMaker { +// A helper class to simplify state management when creating a HIDL model. +class ModelBuilder::HidlModelMaker { public: static Model run(const ModelBuilder* model); private: - static Model::Subgraph makeSubgraph(const ModelBuilder* model); - ModelMaker() {} - Model makeModel(const ModelBuilder* mainModel); + static Subgraph makeSubgraph(const ModelBuilder* model); + HidlModelMaker() {} + Model makeHidlModel(const ModelBuilder* mainModel); uint32_t addSubgraph(const ModelBuilder* refModel); - void updateOperandLocations(const ModelBuilder* refModel, Model::Subgraph* subgraph); + void updateOperandLocations(const ModelBuilder* refModel, Subgraph* subgraph); void addExtensions(const ModelBuilder* model); void addExtensionWithPrefix(uint16_t prefix); - std::vector<Model::Subgraph> mRefSubgraphs; - Model::OperandValues mOperandValues; + std::vector<Subgraph> mRefSubgraphs; + std::vector<uint8_t> mOperandValues; MemoryTracker mMemories; - std::vector<Model::ExtensionNameAndPrefix> mExtensionNameToPrefix; + std::vector<ExtensionNameAndPrefix> mExtensionNameToPrefix; std::set<uint16_t> mPrefixSet; }; -Model ModelBuilder::makeModel() const { - // TODO: Cache the Model to speed up subsequent calls. - return ModelMaker::run(this); +Model ModelBuilder::makeHidlModel() const { + // TODO: Cache the HIDL model to speed up subsequent calls. + return HidlModelMaker::run(this); } -Model ModelBuilder::ModelMaker::run(const ModelBuilder* model) { - // run() ensures the state of ModelMaker is destroyed after the call. - return ModelMaker().makeModel(model); +Model ModelBuilder::HidlModelMaker::run(const ModelBuilder* model) { + // run() ensures the state of HidlModelMaker is destroyed after the call. + return HidlModelMaker().makeHidlModel(model); } -Model ModelBuilder::ModelMaker::makeModel(const ModelBuilder* mainModel) { +Model ModelBuilder::HidlModelMaker::makeHidlModel(const ModelBuilder* mainModel) { addExtensions(mainModel); Model model; model.main = makeSubgraph(mainModel); updateOperandLocations(mainModel, &model.main); model.referenced = std::move(mRefSubgraphs); model.operandValues = std::move(mOperandValues); - model.pools.reserve(mMemories.size()); - std::transform(mMemories.begin(), mMemories.end(), std::back_inserter(model.pools), - [](const RuntimeMemory* m) { return m->getMemory(); }); + model.pools.resize(mMemories.size()); + std::transform(mMemories.begin(), mMemories.end(), model.pools.begin(), + [](const Memory* m) { return m->getHidlMemory(); }); model.relaxComputationFloat32toFloat16 = mainModel->mRelaxComputationFloat32toFloat16; model.extensionNameToPrefix = std::move(mExtensionNameToPrefix); return model; } -Model::Subgraph ModelBuilder::ModelMaker::makeSubgraph(const ModelBuilder* model) { - Model::Subgraph subgraph; +Subgraph ModelBuilder::HidlModelMaker::makeSubgraph(const ModelBuilder* model) { + Subgraph subgraph; subgraph.operands = model->mOperands; subgraph.operations = model->mOperations; subgraph.inputIndexes = model->mInputIndexes; @@ -918,22 +920,27 @@ return subgraph; } -void ModelBuilder::ModelMaker::updateOperandLocations(const ModelBuilder* refModel, - Model::Subgraph* subgraph) { +void ModelBuilder::HidlModelMaker::updateOperandLocations(const ModelBuilder* refModel, + Subgraph* subgraph) { for (Operand& operand : subgraph->operands) { - if (operand.lifetime == Operand::LifeTime::CONSTANT_COPY) { + if (operand.lifetime == OperandLifeTime::CONSTANT_COPY) { uint32_t valueLength = operand.location.length; + uint32_t existingSize = mOperandValues.size(); + uint32_t extraBytes = alignBytesNeeded(existingSize, valueLength); uint32_t originalOffset = operand.location.offset; - operand.location = mOperandValues.append(&refModel->mSmallOperandValues[originalOffset], - valueLength); - } else if (operand.lifetime == Operand::LifeTime::CONSTANT_REFERENCE) { + uint32_t offset = existingSize + extraBytes; + mOperandValues.resize(offset + valueLength); + memcpy(&mOperandValues[offset], &refModel->mSmallOperandValues[originalOffset], + valueLength); + operand.location.offset = offset; + } else if (operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) { uint32_t originalPoolIndex = operand.location.poolIndex; operand.location.poolIndex = mMemories.add(refModel->mMemories[originalPoolIndex]); } } // Do recursive calls at the end to improve locality of mOperandValues. for (Operand& operand : subgraph->operands) { - if (operand.lifetime == Operand::LifeTime::SUBGRAPH) { + if (operand.lifetime == OperandLifeTime::SUBGRAPH) { uint32_t refModelIndex = operand.location.offset; // TODO(b/147875885): Avoid creating duplicate refSubgraphs when // a single refModel is referenced multiple times. @@ -942,22 +949,23 @@ } } -uint32_t ModelBuilder::ModelMaker::addSubgraph(const ModelBuilder* refModel) { +uint32_t ModelBuilder::HidlModelMaker::addSubgraph(const ModelBuilder* refModel) { uint32_t index = mRefSubgraphs.size(); mRefSubgraphs.push_back(makeSubgraph(refModel)); updateOperandLocations(refModel, &mRefSubgraphs.back()); return index; } -void ModelBuilder::ModelMaker::addExtensions(const ModelBuilder* model) { +void ModelBuilder::HidlModelMaker::addExtensions(const ModelBuilder* model) { + constexpr uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE); for (const auto& operand : model->mOperands) { - if (isExtension(operand.type)) { - addExtensionWithPrefix(static_cast<uint32_t>(operand.type) >> kExtensionTypeBits); + if (isExtensionOperandType(operand.type)) { + addExtensionWithPrefix(static_cast<uint32_t>(operand.type) >> kLowBitsType); } } for (const auto& operation : model->mOperations) { - if (isExtension(operation.type)) { - addExtensionWithPrefix(static_cast<uint32_t>(operation.type) >> kExtensionTypeBits); + if (isExtensionOperationType(operation.type)) { + addExtensionWithPrefix(static_cast<uint32_t>(operation.type) >> kLowBitsType); } } for (const auto& refModel : model->mReferencedModels) { @@ -965,7 +973,7 @@ } } -void ModelBuilder::ModelMaker::addExtensionWithPrefix(uint16_t prefix) { +void ModelBuilder::HidlModelMaker::addExtensionWithPrefix(uint16_t prefix) { if (!mPrefixSet.insert(prefix).second) { return; } @@ -977,7 +985,5 @@ }); } -#undef NN_VALIDATE_NULL_OR_SIZED - } // namespace nn } // namespace android
diff --git a/runtime/ModelBuilder.h b/runtime/ModelBuilder.h index ae9df8b..94baab7 100644 --- a/runtime/ModelBuilder.h +++ b/runtime/ModelBuilder.h
@@ -20,13 +20,13 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_MODEL_BUILDER_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_MODEL_BUILDER_H -#include <LegacyUtils.h> - #include <memory> #include <vector> +#include "HalInterfaces.h" #include "Memory.h" #include "NeuralNetworks.h" +#include "Utils.h" namespace android { namespace nn { @@ -34,7 +34,7 @@ class CompilationBuilder; class Device; class ExecutionPlan; -class RuntimeMemory; +class Memory; class ModelBuilder { public: @@ -44,7 +44,7 @@ // Adds an operand to the model. int addOperand(const ANeuralNetworksOperandType& type); int setOperandValue(uint32_t index, const void* buffer, size_t length); - int setOperandValueFromMemory(uint32_t index, const RuntimeMemory* memory, uint32_t offset, + int setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset, size_t length); int setOperandValueFromModel(uint32_t index, const ModelBuilder* value); int setOperandSymmPerChannelQuantParams( @@ -72,7 +72,7 @@ const std::vector<std::shared_ptr<Device>>& devices, bool explicitDeviceList = false); - Model makeModel() const; + hal::Model makeHidlModel() const; uint32_t operandCount() const { // We don't allow more than uint32_t worth of operands @@ -89,7 +89,7 @@ return mInputIndexes[i]; } const std::vector<uint32_t>& getInputOperandIndexes() const { return mInputIndexes; } - const Operand& getInputOperand(uint32_t i) const { + const hal::Operand& getInputOperand(uint32_t i) const { uint32_t index = getInputOperandIndex(i); CHECK_LT(index, mOperands.size()); return mOperands[index]; @@ -99,15 +99,15 @@ return mOutputIndexes[i]; } const std::vector<uint32_t>& getOutputOperandIndexes() const { return mOutputIndexes; } - const Operand& getOutputOperand(uint32_t i) const { + const hal::Operand& getOutputOperand(uint32_t i) const { uint32_t index = getOutputOperandIndex(i); CHECK_LT(index, mOperands.size()); return mOperands[index]; } - const Operand& getOperand(uint32_t index) const { return mOperands[index]; } - const Operation& getOperation(uint32_t index) const { return mOperations[index]; } + const hal::Operand& getOperand(uint32_t index) const { return mOperands[index]; } + const hal::Operation& getOperation(uint32_t index) const { return mOperations[index]; } const MemoryTracker& getMemories() const { return mMemories; } - const std::vector<Operation>& getOperations() const { return mOperations; } + const std::vector<hal::Operation>& getOperations() const { return mOperations; } const std::vector<uint32_t>& getSortedOperationMapping() const { return mSortedOperationIndexMap; } @@ -121,15 +121,14 @@ CHECK_LT(i, mReferencedModels.size()); return mReferencedModels[i]; } - const ModelBuilder* getReferencedModel(const Operand& operand) const { - CHECK(operand.lifetime == Operand::LifeTime::SUBGRAPH); + const ModelBuilder* getReferencedModel(const hal::Operand& operand) const { + CHECK(operand.lifetime == hal::OperandLifeTime::SUBGRAPH); return getReferencedModel(operand.location.offset); } - // simulateFailureResultCode == ANEURALNETWORKS_NO_ERROR means behave normally. int partitionTheWork(const std::vector<std::shared_ptr<Device>>& devices, uint32_t preference, - uint32_t priority, const OptionalTimePoint& deadline, ExecutionPlan* plan, - int simulateFailureResultCode = ANEURALNETWORKS_NO_ERROR) const; + uint32_t priority, const std::optional<Deadline>& deadline, + ExecutionPlan* plan) const; private: // TODO(b/132322449): move partitionTheWork, findBestDeviceForEachOperation, @@ -161,7 +160,8 @@ int partitionTheWorkInternal(uint32_t sourceModelIndex, const std::vector<std::shared_ptr<Device>>& devices, uint32_t preference, uint32_t priority, - const OptionalTimePoint& deadline, ExecutionPlan* plan) const; + const std::optional<Deadline>& deadline, + ExecutionPlan* plan) const; // Return true if either mCompleteModel or mInvalidModel is true. bool badState(const char* name); @@ -172,7 +172,7 @@ // optional arguments are set to default values. This transformation enables // more drivers to execute the model. See http://b/147105700. void removeTrailingArgumentsWithDefaultValues(); - uint32_t getNumTrailingArgumentsToRemove(const Operation& operation) const; + uint32_t getNumTrailingArgumentsToRemove(const hal::Operation& operation) const; // Sorts the operations to be in the correct order for single threaded // node-at-a-time execution. @@ -182,7 +182,7 @@ int copyLargeValuesToSharedMemory(); // The operations of the graph. - std::vector<Operation> mOperations; + std::vector<hal::Operation> mOperations; // The mapping from sorted index to the original index of operations in mOperations. // mSortedOperationIndexMap is empty before sortIntoRunOrder() is called. std::vector<uint32_t> mSortedOperationIndexMap; @@ -191,7 +191,7 @@ // Is at least one of those operations an extension operation? bool mHasExtensionOperation = false; // The description of the operands of the graph. - std::vector<Operand> mOperands; + std::vector<hal::Operand> mOperands; // Is at least one of those operands an OEM operand? bool mHasOEMOperand = false; // The indexes of input operands of the model. @@ -231,11 +231,7 @@ // Models referenced by operands in this model. std::vector<const ModelBuilder*> mReferencedModels; - // Main subgraphs of models referenced by operands in this model. Required - // for validateOperation(). - std::vector<Model::Subgraph> mReferencedSubgraphsForValidation; - - class ModelMaker; + class HidlModelMaker; }; } // namespace nn
diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp index b91d3ce..5d3dae4 100644 --- a/runtime/NeuralNetworks.cpp +++ b/runtime/NeuralNetworks.cpp
@@ -22,35 +22,31 @@ #include "NeuralNetworks.h" -#include <ControlFlow.h> -#include <LegacyUtils.h> -#include <MetaModel.h> -#include <Tracing.h> -#include <nnapi/Types.h> +#include <vndk/hardware_buffer.h> #include <algorithm> #include <cstddef> #include <memory> -#include <utility> #include <vector> #include "BurstBuilder.h" +#include "Callbacks.h" #include "CompilationBuilder.h" +#include "ControlFlow.h" #include "Event.h" #include "ExecutionBuilder.h" -#include "ExecutionCallback.h" -#include "FeatureLevel.h" +#include "HalInterfaces.h" #include "Manager.h" #include "Memory.h" +#include "MetaModel.h" #include "ModelBuilder.h" #include "NeuralNetworksExtensions.h" #include "NeuralNetworksOEM.h" - -#ifdef NN_COMPATIBILITY_LIBRARY_BUILD -#include "NeuralNetworksSupportLibraryImpl.h" -#endif // NN_COMPATIBILITY_LIBRARY_BUILD +#include "Tracing.h" +#include "Utils.h" using namespace android::nn; +using namespace android::nn::hal; // Make sure the constants defined in the header files have not changed values. // IMPORTANT: When adding new values, update kNumberOfDataTypes or kNumberOfDataTypesOEM @@ -562,14 +558,12 @@ // Make sure that the constants are compatible with the values defined in // hardware/interfaces/neuralnetworks/1.3/types.hal. -static_assert(android::nn::convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_LOW) == - Priority::LOW, +static_assert(android::nn::convertToHalPriority(ANEURALNETWORKS_PRIORITY_LOW) == Priority::LOW, "ANEURALNETWORKS_PRIORITY_LOW does not map to Priority::LOW"); -static_assert(android::nn::convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_MEDIUM) == +static_assert(android::nn::convertToHalPriority(ANEURALNETWORKS_PRIORITY_MEDIUM) == Priority::MEDIUM, "ANEURALNETWORKS_PRIORITY_MEDIUM does not map to Priority::MEDIUM"); -static_assert(android::nn::convertToCanonicalPriority(ANEURALNETWORKS_PRIORITY_HIGH) == - Priority::HIGH, +static_assert(android::nn::convertToHalPriority(ANEURALNETWORKS_PRIORITY_HIGH) == Priority::HIGH, "ANEURALNETWORKS_PRIORITY_HIGH does not map to Priority::HIGH"); // Asserts for ANeuralNetworksOperandType memory layout @@ -603,8 +597,9 @@ // Asserts for compilation caching static_assert(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN == 32, "ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN has changed"); -static_assert(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN == kByteSizeOfCacheToken, - "ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN != kByteSizeOfCacheToken"); +static_assert(static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN) == + ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, + "Constant::BYTE_SIZE_OF_CACHE_TOKEN != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN"); // Asserts for compilation priority static_assert(ANEURALNETWORKS_PRIORITY_LOW == 90, "ANEURALNETWORKS_PRIORITY_LOW has changed"); @@ -614,33 +609,13 @@ static_assert(ANEURALNETWORKS_PRIORITY_DEFAULT == ANEURALNETWORKS_PRIORITY_MEDIUM, "ANEURALNETWORKS_PRIORITY_DEFAULT has changed"); -// Asserts for feature levels -static_assert(ANEURALNETWORKS_FEATURE_LEVEL_1 == 27, "ANEURALNETWORKS_FEATURE_LEVEL_1 has changed"); -static_assert(ANEURALNETWORKS_FEATURE_LEVEL_2 == 28, "ANEURALNETWORKS_FEATURE_LEVEL_2 has changed"); -static_assert(ANEURALNETWORKS_FEATURE_LEVEL_3 == 29, "ANEURALNETWORKS_FEATURE_LEVEL_3 has changed"); -static_assert(ANEURALNETWORKS_FEATURE_LEVEL_4 == 30, "ANEURALNETWORKS_FEATURE_LEVEL_4 has changed"); -static_assert(ANEURALNETWORKS_FEATURE_LEVEL_5 == 31, "ANEURALNETWORKS_FEATURE_LEVEL_5 has changed"); - -#ifdef NN_COMPATIBILITY_LIBRARY_BUILD - -static_assert(sizeof(SL_ANeuralNetworksPerformanceInfo) == sizeof(float) * 2, - "SL_ANeuralNetworksPerformanceInfo size changed"); -static_assert(sizeof(SL_ANeuralNetworksOperandPerformanceInfo) == - sizeof(float) * 2 + sizeof(int32_t), - "SL_ANeuralNetworksOperandPerformanceInfo size changed"); -static_assert(sizeof(SL_ANeuralNetworksExtensionOperandTypeInformation) == 8, - "SL_ANeuralNetworksExtensionOperandTypeInformation size changed"); - -static_assert(SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_RELAXED_SCALAR == 0, - "SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_RELAXED_SCALAR has changed"); -static_assert(SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_RELAXED_TENSOR == 1, - "SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_RELAXED_TENSOR has changed"); -static_assert(SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_IF == 2, - "SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_IF has changed"); -static_assert(SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_WHILE == 3, - "SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_WHILE has changed"); - -#endif // NN_COMPATIBILITY_LIBRARY_BUILD +// Asserts for loop timeout duration +static_assert(static_cast<uint64_t>(LoopTimeoutDurationNs::DEFAULT) == + operation_while::kTimeoutNsDefault, + "LoopTimeoutDurationNs::DEFAULT != operation_while::kTimeoutNsDefault"); +static_assert(static_cast<uint64_t>(LoopTimeoutDurationNs::MAXIMUM) == + operation_while::kTimeoutNsMaximum, + "LoopTimeoutDurationNs::MAXIMUM != operation_while::kTimeoutNsMaximum"); int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) { if (numDevices == nullptr) { @@ -743,7 +718,7 @@ return ANEURALNETWORKS_BAD_STATE; } - const Model canonicalModel = m->makeModel(); + const Model hidlModel = m->makeHidlModel(); const std::vector<uint32_t>& opMap = m->getSortedOperationMapping(); // init the output array to false for all the operations. std::fill(supportedOps, supportedOps + opMap.size(), false); @@ -762,7 +737,7 @@ } Device* d = reinterpret_cast<Device*>(const_cast<ANeuralNetworksDevice*>(devices[i])); - const MetaModel metaModel(canonicalModel, DeviceManager::get()->strictSlicing()); + const MetaModel metaModel(hidlModel, DeviceManager::get()->strictSlicing()); const std::vector<bool> supportsByDevice = d->getSupportedOperations(metaModel); for (uint32_t j = 0; j < supportsByDevice.size(); j++) { uint32_t originalIdx = opMap[j]; @@ -1013,9 +988,9 @@ LOG(ERROR) << "ANeuralNetworksMemory_copy passed a nullptr"; return ANEURALNETWORKS_UNEXPECTED_NULL; } - const RuntimeMemory* s = reinterpret_cast<const RuntimeMemory*>(src); - const RuntimeMemory* d = reinterpret_cast<const RuntimeMemory*>(dst); - return RuntimeMemory::copy(*s, *d); + const Memory* s = reinterpret_cast<const Memory*>(src); + const Memory* d = reinterpret_cast<const Memory*>(dst); + return Memory::copy(*s, *d); } int ANeuralNetworksMemory_createFromFd(size_t size, int prot, int fd, size_t offset, @@ -1049,7 +1024,7 @@ void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) { NNTRACE_RT(NNTRACE_PHASE_TERMINATION, "ANeuralNetworksMemory_free"); // No validation. Free of nullptr is valid. - RuntimeMemory* m = reinterpret_cast<RuntimeMemory*>(memory); + Memory* m = reinterpret_cast<Memory*>(memory); delete m; } @@ -1116,7 +1091,7 @@ LOG(ERROR) << "ANeuralNetworksModel_setOperandValue passed a nullptr"; return ANEURALNETWORKS_UNEXPECTED_NULL; } - const RuntimeMemory* mem = reinterpret_cast<const RuntimeMemory*>(memory); + const Memory* mem = reinterpret_cast<const Memory*>(memory); ModelBuilder* m = reinterpret_cast<ModelBuilder*>(model); return m->setOperandValueFromMemory(index, mem, offset, length); } @@ -1191,14 +1166,7 @@ ModelBuilder* m = reinterpret_cast<ModelBuilder*>(model); CompilationBuilder* c = nullptr; - - const auto& drivers = DeviceManager::get()->getDrivers(); - std::vector<std::shared_ptr<Device>> nonUpdatableDrivers; - nonUpdatableDrivers.reserve(drivers.size()); - std::copy_if(drivers.begin(), drivers.end(), std::back_inserter(nonUpdatableDrivers), - [](const auto& driver) { return !driver->isUpdatable(); }); - - int result = m->createCompilation(&c, nonUpdatableDrivers); + int result = m->createCompilation(&c, DeviceManager::get()->getDrivers()); *compilation = reinterpret_cast<ANeuralNetworksCompilation*>(c); return result; } @@ -1334,7 +1302,7 @@ return ANEURALNETWORKS_UNEXPECTED_NULL; } - const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory); + const Memory* m = reinterpret_cast<const Memory*>(memory); ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution); return r->setInputFromMemory(index, type, m, offset, length); } @@ -1362,7 +1330,7 @@ } ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution); - const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory); + const Memory* m = reinterpret_cast<const Memory*>(memory); return r->setOutputFromMemory(index, type, m, offset, length); } @@ -1382,14 +1350,20 @@ ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution); - std::shared_ptr<ExecutionCallback> callback; + // Dynamically allocate an sp to wrap an ExecutionCallback, seen in the NN + // API as an abstract event object. The sp<ExecutionCallback> object is + // returned when the execution has been successfully launched, otherwise a + // nullptr is returned. The sp is used for ref-counting purposes. Without + // it, the HIDL service could attempt to communicate with a dead callback + // object. + std::unique_ptr<sp<ExecutionCallback>> callback = std::make_unique<sp<ExecutionCallback>>(); *event = nullptr; - int n = r->computeAsynchronously(&callback); + int n = r->computeAsynchronously(callback.get()); if (n != ANEURALNETWORKS_NO_ERROR) { return n; } - auto e = std::make_unique<CallbackEvent>(std::move(callback)); + std::unique_ptr<CallbackEvent> e = std::make_unique<CallbackEvent>(*callback); *event = reinterpret_cast<ANeuralNetworksEvent*>(e.release()); return ANEURALNETWORKS_NO_ERROR; } @@ -1413,7 +1387,8 @@ } IEvent* e = reinterpret_cast<IEvent*>(event); - return convertErrorStatusToResultCode(e->wait()); + e->wait(); + return convertErrorStatusToResultCode(e->getStatus()); } void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) { @@ -1512,8 +1487,7 @@ *event = nullptr; return ANEURALNETWORKS_BAD_DATA; } - std::unique_ptr<SyncFenceEvent> e = - std::make_unique<SyncFenceEvent>(syncFenceFd, nullptr, nullptr); + std::unique_ptr<SyncFenceEvent> e = std::make_unique<SyncFenceEvent>(syncFenceFd, nullptr); *event = reinterpret_cast<ANeuralNetworksEvent*>(e.release()); return ANEURALNETWORKS_NO_ERROR; } @@ -1534,7 +1508,7 @@ if (*syncFenceFd <= 0) { LOG(ERROR) << "ANeuralNetworksEvent_getSyncFenceFd unable to get valid sync_fence fd"; *syncFenceFd = -1; - return ANEURALNETWORKS_BAD_DATA; + return ANEURALNETWORKS_OP_FAILED; } return ANEURALNETWORKS_NO_ERROR; } @@ -1569,34 +1543,10 @@ waitForList.push_back(syncFenceFd); } } - - if (r->getCompilation()->hasDynamicTemporaries()) { - // The current implementation of fenced execution does not support - // dynamic temporaries. Fall back to non fenced execution. - LOG(INFO) << "ANeuralNetworksExecution_startComputeWithDependencies falling back" - << " to ANeuralNetworksExecution_startCompute" - << " because of boundary operands of unknown size"; - for (int syncFenceFd : waitForList) { - if (syncFenceFd > 0) { - auto w = syncWait(syncFenceFd, -1); - if (w != FenceState::SIGNALED) { - VLOG(EXECUTION) << "syncWait failed, fd: " << syncFenceFd; - *event = nullptr; - return ANEURALNETWORKS_OP_FAILED; - } - } - } - return ANeuralNetworksExecution_startCompute(execution, event); - } - int syncFenceToSignal = -1; int n = r->computeFenced(waitForList, duration, &syncFenceToSignal); - std::unique_ptr<SyncFenceEvent> e = std::make_unique<SyncFenceEvent>( - syncFenceToSignal, r->getExecuteFencedInfoCallback(), - // TODO(miaowang): support dynamic output shape only with memory domain. - // For now just return empty output shapes. - [r](ErrorStatus status) { return r->finishComputation(status, {}); }); - close(syncFenceToSignal); + std::unique_ptr<SyncFenceEvent> e = + std::make_unique<SyncFenceEvent>(syncFenceToSignal, r->getFencedExecutionCallback()); if (n != ANEURALNETWORKS_NO_ERROR) { *event = nullptr; } else { @@ -1604,348 +1554,3 @@ } return n; } - -int64_t ANeuralNetworks_getRuntimeFeatureLevel() { - return kCurrentNNAPIRuntimeFeatureLevel; -} - -int ANeuralNetworksExecution_enableInputAndOutputPadding(ANeuralNetworksExecution* execution, - bool enable) { - NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_enableInputAndOutputPadding"); - if (!execution) { - LOG(ERROR) << "ANeuralNetworksExecution_enableInputAndOutputPadding passed a nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution); - return r->enableInputAndOutputPadding(enable); -} - -int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput( - const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment) { - NNTRACE_RT(NNTRACE_PHASE_COMPILATION, - "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput"); - if (!compilation || !alignment) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput passed a " - "nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - const CompilationBuilder* c = reinterpret_cast<const CompilationBuilder*>(compilation); - return c->getPreferredMemoryAlignmentForInput(index, alignment); -} - -int ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput( - const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding) { - NNTRACE_RT(NNTRACE_PHASE_COMPILATION, - "ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput"); - if (!compilation || !padding) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput passed a " - "nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - const CompilationBuilder* c = reinterpret_cast<const CompilationBuilder*>(compilation); - return c->getPreferredMemoryPaddingForInput(index, padding); -} - -int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput( - const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment) { - NNTRACE_RT(NNTRACE_PHASE_COMPILATION, - "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput"); - if (!compilation || !alignment) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput passed a " - "nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - const CompilationBuilder* c = reinterpret_cast<const CompilationBuilder*>(compilation); - return c->getPreferredMemoryAlignmentForOutput(index, alignment); -} - -int ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput( - const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding) { - NNTRACE_RT(NNTRACE_PHASE_COMPILATION, - "ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput"); - if (!compilation || !padding) { - LOG(ERROR) << "ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput passed a " - "nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - const CompilationBuilder* c = reinterpret_cast<const CompilationBuilder*>(compilation); - return c->getPreferredMemoryPaddingForOutput(index, padding); -} - -int ANeuralNetworksExecution_setReusable(ANeuralNetworksExecution* execution, bool reusable) { - NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ANeuralNetworksExecution_setReusable"); - if (!execution) { - LOG(ERROR) << "ANeuralNetworksExecution_setReusable passed a nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - ExecutionBuilder* r = reinterpret_cast<ExecutionBuilder*>(execution); - return r->setReusable(reusable); -} - -#ifdef NN_COMPATIBILITY_LIBRARY_BUILD - -int SL_ANeuralNetworksCompilation_setCachingFromFds(ANeuralNetworksCompilation* compilation, - const int* modelCacheFds, - const uint32_t numModelCacheFiles, - const int* dataCacheFds, - const uint32_t numDataCacheFiles, - const uint8_t* token) { - NNTRACE_RT(NNTRACE_PHASE_COMPILATION, "SL_ANeuralNetworksCompilation_setCachingFromFds"); - if (!compilation || (numModelCacheFiles != 0 && !modelCacheFds) || - (numDataCacheFiles != 0 && !dataCacheFds) || !token) { - LOG(ERROR) << "SL_ANeuralNetworksCompilation_setCachingFromFds passed a nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - CompilationBuilder* c = reinterpret_cast<CompilationBuilder*>(compilation); - return c->setCachingFromFds(modelCacheFds, numModelCacheFiles, dataCacheFds, numDataCacheFiles, - token); -} - -int SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded(const ANeuralNetworksDevice* device, - uint32_t* numModelCacheFiles, - uint32_t* numDataCacheFiles) { - if (numModelCacheFiles) *numModelCacheFiles = 0; - if (numDataCacheFiles) *numDataCacheFiles = 0; - - if (device == nullptr || numModelCacheFiles == nullptr || numDataCacheFiles == nullptr) { - LOG(ERROR) << "SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded passed a nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - - const Device* d = reinterpret_cast<const Device*>(device); - std::tie(*numModelCacheFiles, *numDataCacheFiles) = d->getNumberOfCacheFilesNeeded(); - return ANEURALNETWORKS_NO_ERROR; -} - -int SL_ANeuralNetworksDevice_getPerformanceInfo( - const ANeuralNetworksDevice* device, int32_t performanceInfoKind, - SL_ANeuralNetworksPerformanceInfo* performanceInfo) { - if (performanceInfo) *performanceInfo = {.execTime = 0.0f, .powerUsage = 0.0f}; - - if (device == nullptr || performanceInfo == nullptr) { - LOG(ERROR) << "SL_ANeuralNetworksDevice_getPerformanceInfo passed a nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - - constexpr auto conv = [](const Capabilities::PerformanceInfo& info) { - return SL_ANeuralNetworksPerformanceInfo{.execTime = info.execTime, - .powerUsage = info.powerUsage}; - }; - - const Device* d = reinterpret_cast<const Device*>(device); - const Capabilities& capabilities = d->getCapabilities(); - - switch (performanceInfoKind) { - case SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_RELAXED_SCALAR: - *performanceInfo = conv(capabilities.relaxedFloat32toFloat16PerformanceScalar); - return ANEURALNETWORKS_NO_ERROR; - case SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_RELAXED_TENSOR: - *performanceInfo = conv(capabilities.relaxedFloat32toFloat16PerformanceTensor); - return ANEURALNETWORKS_NO_ERROR; - case SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_IF: - *performanceInfo = conv(capabilities.ifPerformance); - return ANEURALNETWORKS_NO_ERROR; - case SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_WHILE: - *performanceInfo = conv(capabilities.whilePerformance); - return ANEURALNETWORKS_NO_ERROR; - } - - LOG(ERROR) << "SL_ANeuralNetworksDevice_getPerformanceInfo passed unknown performanceInfoKind " - << performanceInfoKind; - return ANEURALNETWORKS_BAD_DATA; -} - -int SL_ANeuralNetworksDevice_forEachOperandPerformanceInfo( - const ANeuralNetworksDevice* device, void* context, - void (*callback)(SL_ANeuralNetworksOperandPerformanceInfo, void*)) { - if (device == nullptr || context == nullptr || callback == nullptr) { - LOG(ERROR) << "SL_ANeuralNetworksDevice_forEachOperandPerformanceInfo passed a nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - - constexpr auto conv = [](const Capabilities::OperandPerformance& operandPerformance) { - return SL_ANeuralNetworksOperandPerformanceInfo{ - .operandType = static_cast<int32_t>(operandPerformance.type), - .performanceInfo = {.execTime = operandPerformance.info.execTime, - .powerUsage = operandPerformance.info.powerUsage}, - }; - }; - - const Device* d = reinterpret_cast<const Device*>(device); - const Capabilities& capabilities = d->getCapabilities(); - - for (const auto& operandPerformance : capabilities.operandPerformance.asVector()) { - const SL_ANeuralNetworksOperandPerformanceInfo opPerf = conv(operandPerformance); - callback(opPerf, context); - } - return ANEURALNETWORKS_NO_ERROR; -} - -int SL_ANeuralNetworksDevice_getVendorExtensionCount(const ANeuralNetworksDevice* device, - uint32_t* vendorExtensionCount) { - if (vendorExtensionCount) *vendorExtensionCount = 0; - - if (device == nullptr || vendorExtensionCount == nullptr) { - LOG(ERROR) << "SL_ANeuralNetworksDevice_getVendorExtensionCount passed a nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - - const Device* d = reinterpret_cast<const Device*>(device); - *vendorExtensionCount = d->getSupportedExtensions().size(); - return ANEURALNETWORKS_NO_ERROR; -} - -int SL_ANeuralNetworksDevice_getVendorExtensionName(const ANeuralNetworksDevice* device, - uint32_t vendorExtensionIndex, - const char** extensionName) { - if (extensionName) *extensionName = nullptr; - - if (device == nullptr || extensionName == nullptr) { - LOG(ERROR) << "SL_ANeuralNetworksDevice_getVendorExtensionName passed a nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - - const Device* d = reinterpret_cast<const Device*>(device); - const auto& extensions = d->getSupportedExtensions(); - - if (vendorExtensionIndex >= extensions.size()) { - LOG(ERROR) - << "SL_ANeuralNetworksDevice_getVendorExtensionName passed a vendorExtensionIndex " - "that is out of range"; - return ANEURALNETWORKS_BAD_DATA; - } - const auto& extension = extensions[vendorExtensionIndex]; - - *extensionName = extension.name.c_str(); - return ANEURALNETWORKS_NO_ERROR; -} - -int SL_ANeuralNetworksDevice_forEachVendorExtensionOperandTypeInformation( - const ANeuralNetworksDevice* device, uint32_t vendorExtensionIndex, void* context, - void (*callback)(SL_ANeuralNetworksExtensionOperandTypeInformation, void*)) { - if (device == nullptr || context == nullptr || callback == nullptr) { - LOG(ERROR) - << "SL_ANeuralNetworksDevice_forEachVendorExtensionOperandTypeInformation passed a " - "nullptr"; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - - const Device* d = reinterpret_cast<const Device*>(device); - const auto& extensions = d->getSupportedExtensions(); - - if (vendorExtensionIndex >= extensions.size()) { - LOG(ERROR) - << "SL_ANeuralNetworksDevice_forEachVendorExtensionOperandTypeInformation passed a " - "vendorExtensionIndex that is out of range"; - return ANEURALNETWORKS_BAD_DATA; - } - const auto& operandTypes = extensions[vendorExtensionIndex].operandTypes; - - constexpr auto conv = [](const Extension::OperandTypeInformation& operandTypeInfo) { - return SL_ANeuralNetworksExtensionOperandTypeInformation{ - .byteSize = operandTypeInfo.byteSize, - .type = operandTypeInfo.type, - .isTensor = operandTypeInfo.isTensor, - }; - }; - - for (const auto& operandTypeInfo : operandTypes) { - const SL_ANeuralNetworksExtensionOperandTypeInformation opTypeInfo = conv(operandTypeInfo); - callback(opTypeInfo, context); - } - return ANEURALNETWORKS_NO_ERROR; -} - -#define NNCL_FUNC(symbol) .symbol = symbol - -NnApiSLDriverImplFL5 slDriverImpl{ - .base{.implFeatureLevel = ANEURALNETWORKS_FEATURE_LEVEL_5}, - NNCL_FUNC(ANeuralNetworksBurst_create), - NNCL_FUNC(ANeuralNetworksBurst_free), - NNCL_FUNC(ANeuralNetworksCompilation_createForDevices), - NNCL_FUNC(ANeuralNetworksCompilation_finish), - NNCL_FUNC(ANeuralNetworksCompilation_free), - NNCL_FUNC(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput), - NNCL_FUNC(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput), - NNCL_FUNC(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput), - NNCL_FUNC(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput), - NNCL_FUNC(ANeuralNetworksCompilation_setCaching), - NNCL_FUNC(ANeuralNetworksCompilation_setPreference), - NNCL_FUNC(ANeuralNetworksCompilation_setPriority), - NNCL_FUNC(ANeuralNetworksCompilation_setTimeout), - NNCL_FUNC(ANeuralNetworksDevice_getExtensionSupport), - NNCL_FUNC(ANeuralNetworksDevice_getFeatureLevel), - NNCL_FUNC(ANeuralNetworksDevice_getName), - NNCL_FUNC(ANeuralNetworksDevice_getType), - NNCL_FUNC(ANeuralNetworksDevice_getVersion), - NNCL_FUNC(ANeuralNetworksDevice_wait), - NNCL_FUNC(ANeuralNetworksEvent_createFromSyncFenceFd), - NNCL_FUNC(ANeuralNetworksEvent_free), - NNCL_FUNC(ANeuralNetworksEvent_getSyncFenceFd), - NNCL_FUNC(ANeuralNetworksEvent_wait), - NNCL_FUNC(ANeuralNetworksExecution_burstCompute), - NNCL_FUNC(ANeuralNetworksExecution_compute), - NNCL_FUNC(ANeuralNetworksExecution_create), - NNCL_FUNC(ANeuralNetworksExecution_enableInputAndOutputPadding), - NNCL_FUNC(ANeuralNetworksExecution_free), - NNCL_FUNC(ANeuralNetworksExecution_getDuration), - NNCL_FUNC(ANeuralNetworksExecution_getOutputOperandDimensions), - NNCL_FUNC(ANeuralNetworksExecution_getOutputOperandRank), - NNCL_FUNC(ANeuralNetworksExecution_setInput), - NNCL_FUNC(ANeuralNetworksExecution_setInputFromMemory), - NNCL_FUNC(ANeuralNetworksExecution_setLoopTimeout), - NNCL_FUNC(ANeuralNetworksExecution_setMeasureTiming), - NNCL_FUNC(ANeuralNetworksExecution_setOutput), - NNCL_FUNC(ANeuralNetworksExecution_setOutputFromMemory), - NNCL_FUNC(ANeuralNetworksExecution_setReusable), - NNCL_FUNC(ANeuralNetworksExecution_setTimeout), - NNCL_FUNC(ANeuralNetworksExecution_startComputeWithDependencies), - NNCL_FUNC(ANeuralNetworksMemoryDesc_addInputRole), - NNCL_FUNC(ANeuralNetworksMemoryDesc_addOutputRole), - NNCL_FUNC(ANeuralNetworksMemoryDesc_create), - NNCL_FUNC(ANeuralNetworksMemoryDesc_finish), - NNCL_FUNC(ANeuralNetworksMemoryDesc_free), - NNCL_FUNC(ANeuralNetworksMemoryDesc_setDimensions), - NNCL_FUNC(ANeuralNetworksMemory_copy), - NNCL_FUNC(ANeuralNetworksMemory_createFromAHardwareBuffer), - NNCL_FUNC(ANeuralNetworksMemory_createFromDesc), - NNCL_FUNC(ANeuralNetworksMemory_createFromFd), - NNCL_FUNC(ANeuralNetworksMemory_free), - NNCL_FUNC(ANeuralNetworksModel_addOperand), - NNCL_FUNC(ANeuralNetworksModel_addOperation), - NNCL_FUNC(ANeuralNetworksModel_create), - NNCL_FUNC(ANeuralNetworksModel_finish), - NNCL_FUNC(ANeuralNetworksModel_free), - NNCL_FUNC(ANeuralNetworksModel_getExtensionOperandType), - NNCL_FUNC(ANeuralNetworksModel_getExtensionOperationType), - NNCL_FUNC(ANeuralNetworksModel_getSupportedOperationsForDevices), - NNCL_FUNC(ANeuralNetworksModel_identifyInputsAndOutputs), - NNCL_FUNC(ANeuralNetworksModel_relaxComputationFloat32toFloat16), - NNCL_FUNC(ANeuralNetworksModel_setOperandExtensionData), - NNCL_FUNC(ANeuralNetworksModel_setOperandSymmPerChannelQuantParams), - NNCL_FUNC(ANeuralNetworksModel_setOperandValue), - NNCL_FUNC(ANeuralNetworksModel_setOperandValueFromMemory), - NNCL_FUNC(ANeuralNetworksModel_setOperandValueFromModel), - NNCL_FUNC(ANeuralNetworks_getDefaultLoopTimeout), - NNCL_FUNC(ANeuralNetworks_getDevice), - NNCL_FUNC(ANeuralNetworks_getDeviceCount), - NNCL_FUNC(ANeuralNetworks_getMaximumLoopTimeout), - NNCL_FUNC(ANeuralNetworks_getRuntimeFeatureLevel), - NNCL_FUNC(SL_ANeuralNetworksCompilation_setCachingFromFds), - NNCL_FUNC(SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded), - NNCL_FUNC(SL_ANeuralNetworksDevice_getPerformanceInfo), - NNCL_FUNC(SL_ANeuralNetworksDevice_forEachOperandPerformanceInfo), - NNCL_FUNC(SL_ANeuralNetworksDevice_getVendorExtensionCount), - NNCL_FUNC(SL_ANeuralNetworksDevice_getVendorExtensionName), - NNCL_FUNC(SL_ANeuralNetworksDevice_forEachVendorExtensionOperandTypeInformation), -}; - -#undef NNCL_FUNC - -__BEGIN_DECLS -NnApiSLDriverImpl* ANeuralNetworks_getSLDriverImpl() { - return reinterpret_cast<NnApiSLDriverImpl*>(&slDriverImpl); -} -__END_DECLS - -#endif // NN_COMPATIBILITY_LIBRARY_BUILD
diff --git a/runtime/TypeManager.cpp b/runtime/TypeManager.cpp index 858de27..872b11c2 100644 --- a/runtime/TypeManager.cpp +++ b/runtime/TypeManager.cpp
@@ -18,9 +18,11 @@ #include "TypeManager.h" -#include <LegacyUtils.h> +#include <PackageInfo.h> #include <android-base/file.h> #include <android-base/properties.h> +#include <binder/IServiceManager.h> +#include <procpartition/procpartition.h> #include <algorithm> #include <limits> @@ -30,11 +32,7 @@ #include <string_view> #include <vector> -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD -#include <PackageInfo.h> -#include <binder/IServiceManager.h> -#include <procpartition/procpartition.h> -#endif // NN_COMPATIBILITY_LIBRARY_BUILD +#include "Utils.h" namespace android { namespace nn { @@ -50,7 +48,11 @@ namespace { -constexpr uint32_t kMaxPrefix = (1 << kExtensionPrefixBits) - 1; +using namespace hal; + +const uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE); +const uint32_t kMaxPrefix = + (1 << static_cast<uint8_t>(ExtensionTypeEncoding::HIGH_BITS_PREFIX)) - 1; // Checks if the two structures contain the same information. The order of // operand types within the structures does not matter. @@ -61,8 +63,6 @@ return true; } -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD - // Property for disabling NNAPI vendor extensions on product image (used on GSI /product image, // which can't use NNAPI vendor extensions). const char kVExtProductDeny[] = "ro.nnapi.extensions.deny_on_product"; @@ -92,7 +92,7 @@ std::istringstream streamData(data); std::string line; while (std::getline(streamData, line)) { - // Do some basic validity check on entry, it's either + // Do some basic sanity check on entry, it's either // fs path or package name. if (StartsWith(line, "/") || line.find('.') != std::string::npos) { allowlist.push_back(line); @@ -103,36 +103,52 @@ return allowlist; } -// Since Android S we allow use of vendor extensions for all -// non-system applications without need to put the binary -// name on allowlist -static bool allowVendorExtensionsForAllNonSystemClients() { -#if defined(__BIONIC__) - return android_get_device_api_level() >= __ANDROID_API_S__; -#else +// Query PackageManagerNative service about Android app properties. +// On success, it will populate appPackageInfo->app* fields. +bool fetchAppPackageLocationInfo(uid_t uid, TypeManager::AppPackageInfo* appPackageInfo) { + ANeuralNetworks_PackageInfo packageInfo; + if (!ANeuralNetworks_fetch_PackageInfo(uid, &packageInfo)) { + return false; + } + appPackageInfo->appPackageName = packageInfo.appPackageName; + appPackageInfo->appIsSystemApp = packageInfo.appIsSystemApp; + appPackageInfo->appIsOnVendorImage = packageInfo.appIsOnVendorImage; + appPackageInfo->appIsOnProductImage = packageInfo.appIsOnProductImage; + + ANeuralNetworks_free_PackageInfo(&packageInfo); return true; -#endif // __BIONIC__ } -#endif // NN_COMPATIBILITY_LIBRARY_BUILD +// Check if this process is allowed to use NNAPI Vendor extensions. +bool isNNAPIVendorExtensionsUseAllowed(const std::vector<std::string>& allowlist) { + TypeManager::AppPackageInfo appPackageInfo = { + .binaryPath = ::android::procpartition::getExe(getpid()), + .appPackageName = "", + .appIsSystemApp = false, + .appIsOnVendorImage = false, + .appIsOnProductImage = false}; + + if (appPackageInfo.binaryPath == "/system/bin/app_process64" || + appPackageInfo.binaryPath == "/system/bin/app_process32") { + if (!fetchAppPackageLocationInfo(getuid(), &appPackageInfo)) { + LOG(ERROR) << "Failed to get app information from package_manager_native"; + return false; + } + } + return TypeManager::isExtensionsUseAllowed( + appPackageInfo, isNNAPIVendorExtensionsUseAllowedInProductImage(), allowlist); +} } // namespace TypeManager::TypeManager() { VLOG(MANAGER) << "TypeManager::TypeManager"; -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD - mExtensionsAllowed = TypeManager::isExtensionsUseAllowed( - AppInfoFetcher::get()->getAppInfo(), isNNAPIVendorExtensionsUseAllowedInProductImage(), - getVendorExtensionAllowlistedApps()); -#else - mExtensionsAllowed = true; -#endif // NN_COMPATIBILITY_LIBRARY_BUILD + mExtensionsAllowed = isNNAPIVendorExtensionsUseAllowed(getVendorExtensionAllowlistedApps()); VLOG(MANAGER) << "NNAPI Vendor extensions enabled: " << mExtensionsAllowed; findAvailableExtensions(); } -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD -bool TypeManager::isExtensionsUseAllowed(const AppInfoFetcher::AppInfo& appPackageInfo, +bool TypeManager::isExtensionsUseAllowed(const AppPackageInfo& appPackageInfo, bool useOnProductImageEnabled, const std::vector<std::string>& allowlist) { // Only selected partitions and user-installed apps (/data) @@ -141,9 +157,6 @@ StartsWith(appPackageInfo.binaryPath, "/odm/") || StartsWith(appPackageInfo.binaryPath, "/data/") || (StartsWith(appPackageInfo.binaryPath, "/product/") && useOnProductImageEnabled)) { - if (allowVendorExtensionsForAllNonSystemClients()) { - return true; - } #ifdef NN_DEBUGGABLE // Only on userdebug and eng builds. // When running tests with mma and adb push. @@ -153,26 +166,20 @@ return true; } #endif // NN_DEBUGGABLE + return std::find(allowlist.begin(), allowlist.end(), appPackageInfo.binaryPath) != allowlist.end(); } else if (appPackageInfo.binaryPath == "/system/bin/app_process64" || appPackageInfo.binaryPath == "/system/bin/app_process32") { - // App is (not system app) OR (vendor app) OR (product app AND product enabled) - if (!appPackageInfo.appIsSystemApp || appPackageInfo.appIsOnVendorImage || - (appPackageInfo.appIsOnProductImage && useOnProductImageEnabled)) { - if (allowVendorExtensionsForAllNonSystemClients()) { - // No need for allowlist - return true; - } else { - // Check if app is on allowlist. - return std::find(allowlist.begin(), allowlist.end(), - appPackageInfo.appPackageName) != allowlist.end(); - } - } + // App is not system app OR vendor app OR (product app AND product enabled) + // AND app is on allowlist. + return (!appPackageInfo.appIsSystemApp || appPackageInfo.appIsOnVendorImage || + (appPackageInfo.appIsOnProductImage && useOnProductImageEnabled)) && + std::find(allowlist.begin(), allowlist.end(), appPackageInfo.appPackageName) != + allowlist.end(); } return false; } -#endif // NN_COMPATIBILITY_LIBRARY_BUILD void TypeManager::findAvailableExtensions() { for (const std::shared_ptr<Device>& device : mDeviceManager->getDrivers()) { @@ -228,7 +235,7 @@ int32_t* type) { uint16_t prefix; NN_RET_CHECK(getExtensionPrefix(extensionName, &prefix)); - *type = (prefix << kExtensionTypeBits) | typeWithinExtension; + *type = (prefix << kLowBitsType) | typeWithinExtension; return true; } @@ -242,8 +249,8 @@ bool TypeManager::getExtensionOperandTypeInfo( OperandType type, const Extension::OperandTypeInformation** info) const { uint32_t operandType = static_cast<uint32_t>(type); - uint16_t prefix = operandType >> kExtensionTypeBits; - uint16_t typeWithinExtension = operandType & ((1 << kExtensionTypeBits) - 1); + uint16_t prefix = operandType >> kLowBitsType; + uint16_t typeWithinExtension = operandType & ((1 << kLowBitsType) - 1); const Extension* extension; NN_RET_CHECK(getExtensionInfo(prefix, &extension)) << "Cannot find extension corresponding to prefix " << prefix; @@ -261,7 +268,7 @@ } bool TypeManager::isTensorType(OperandType type) const { - if (!isExtension(type)) { + if (!isExtensionOperandType(type)) { return !nonExtensionOperandTypeIsScalar(static_cast<int>(type)); } const Extension::OperandTypeInformation* info; @@ -271,7 +278,7 @@ uint32_t TypeManager::getSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) const { - if (!isExtension(type)) { + if (!isExtensionOperandType(type)) { return nonExtensionOperandSizeOfData(type, dimensions); } const Extension::OperandTypeInformation* info; @@ -279,9 +286,9 @@ return info->isTensor ? sizeOfTensorData(info->byteSize, dimensions) : info->byteSize; } -bool TypeManager::sizeOfDataOverflowsUInt32(OperandType type, +bool TypeManager::sizeOfDataOverflowsUInt32(hal::OperandType type, const std::vector<uint32_t>& dimensions) const { - if (!isExtension(type)) { + if (!isExtensionOperandType(type)) { return nonExtensionOperandSizeOfDataOverflowsUInt32(type, dimensions); } const Extension::OperandTypeInformation* info;
diff --git a/runtime/TypeManager.h b/runtime/TypeManager.h index ba67ec4..a06ddb6 100644 --- a/runtime/TypeManager.h +++ b/runtime/TypeManager.h
@@ -22,12 +22,9 @@ #include <string> #include <vector> +#include "HalInterfaces.h" #include "Manager.h" -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD -#include "AppInfoFetcher.h" -#endif // NN_COMPATIBILITY_LIBRARY_BUILD - namespace android { namespace nn { @@ -51,18 +48,18 @@ // Looks up information about the extension corresponding to the given prefix // // Returns false if no extension corresponds to the given prefix. - bool getExtensionInfo(uint16_t prefix, const Extension** extension) const; + bool getExtensionInfo(uint16_t prefix, const hal::Extension** extension) const; // Looks up information about an extension operand type // // Returns false if the extension or type is unknown. - bool getExtensionOperandTypeInfo(OperandType type, - const Extension::OperandTypeInformation** info) const; + bool getExtensionOperandTypeInfo(hal::OperandType type, + const hal::Extension::OperandTypeInformation** info) const; // Returns true if an operand type is a tensor type. // // Aborts if the type is an unknown extension type. - bool isTensorType(OperandType type) const; + bool isTensorType(hal::OperandType type) const; // Returns the amount of space needed to store a value of the dimensions and // type of this operand. For a tensor with unspecified rank or at least one @@ -70,7 +67,7 @@ // // Aborts if the type is an unknown extension type. // Aborts if the size would overflow the return type. - uint32_t getSizeOfData(const Operand& operand) const { + uint32_t getSizeOfData(const hal::Operand& operand) const { return getSizeOfData(operand.type, operand.dimensions); } @@ -79,13 +76,14 @@ // unspecified dimension, returns zero. // // Aborts if the type is an unknown extension type. - uint32_t getSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) const; + uint32_t getSizeOfData(hal::OperandType type, const std::vector<uint32_t>& dimensions) const; // Returns true if the amount of space needed to store a value of the specified // dimensions and element size overflows the uint32_t type. // // See also TypeManager::sizeOfDataOverflowsUInt32(). - bool sizeOfDataOverflowsUInt32(OperandType type, const std::vector<uint32_t>& dimensions) const; + bool sizeOfDataOverflowsUInt32(hal::OperandType type, + const std::vector<uint32_t>& dimensions) const; // Returns true if extensions usage is allowed in current process. bool areExtensionsAllowed() const { return mExtensionsAllowed; } @@ -95,7 +93,7 @@ // Registers an extension. // // Returns true if the registration was successful. - bool forTest_registerExtension(const Extension& extension) { + bool forTest_registerExtension(const hal::Extension& extension) { return registerExtension(extension, "INTERNAL TEST"); } @@ -110,22 +108,34 @@ // available devices. void forTest_reset() { *this = TypeManager(); } -#ifndef NN_COMPATIBILITY_LIBRARY_BUILD + // Collection of app-related arguments for the isExtensionsUseAllowed method. + struct AppPackageInfo { + // Path of the binary (/proc/$PID/exe) + std::string binaryPath; + // Package name of the Android app (empty string if not Android app). + std::string appPackageName; + // Is the app a system app? (false if not an Android app) + bool appIsSystemApp; + // Is the app preinstalled on vendor image? (false if not an Android app) + bool appIsOnVendorImage; + // Is the app preinstalled on product image? (false if not an Android app) + bool appIsOnProductImage; + }; + // Check if NNAPI Vendor extensions are usable in the process with the given app // and supplemental infomation. // // useOnProductImageEnabled - whether apps/binaries preinstalled on /product partition // can be enabled for extensions use. // allowlist - list of apps/binaries which are allowed to use extensions. - static bool isExtensionsUseAllowed(const AppInfoFetcher::AppInfo& appPackageInfo, + static bool isExtensionsUseAllowed(const AppPackageInfo& appPackageInfo, bool useOnProductImageEnabled, const std::vector<std::string>& allowlist); -#endif // NN_COMPATIBILITY_LIBRARY_BUILD private: TypeManager(); void findAvailableExtensions(); - bool registerExtension(Extension extension, const std::string& deviceName); + bool registerExtension(hal::Extension extension, const std::string& deviceName); // Returns the numeric "prefix" value corresponding to an extension. // @@ -135,7 +145,7 @@ const DeviceManager* mDeviceManager = DeviceManager::get(); // Contains all registered extensions. - std::map<std::string, Extension> mExtensionNameToExtension; + std::map<std::string, hal::Extension> mExtensionNameToExtension; // Contains the name of the first discovered device that supports an // extension. Used for error reporting. @@ -150,7 +160,7 @@ std::map<std::string, uint16_t> mExtensionNameToPrefix; // Entries of mPrefixToExtension point into mExtensionNameToExtension. // prefix=0 corresponds to no extension and should never be looked up. - std::vector<Extension*> mPrefixToExtension = {nullptr}; + std::vector<hal::Extension*> mPrefixToExtension = {nullptr}; // True if Extensions can be used in current process. bool mExtensionsAllowed = false;
diff --git a/runtime/VersionedInterfaces.cpp b/runtime/VersionedInterfaces.cpp new file mode 100644 index 0000000..33d290c --- /dev/null +++ b/runtime/VersionedInterfaces.cpp
@@ -0,0 +1,1555 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "VersionedInterfaces" + +#include "VersionedInterfaces.h" + +#include <android-base/logging.h> +#include <android-base/properties.h> +#include <android-base/scopeguard.h> +#include <android-base/thread_annotations.h> +#include <android/sync.h> +#include <cutils/native_handle.h> + +#include <algorithm> +#include <chrono> +#include <functional> +#include <memory> +#include <string> +#include <tuple> +#include <type_traits> +#include <utility> +#include <vector> + +#include "Callbacks.h" +#include "ExecutionBurstController.h" +#include "MetaModel.h" +#include "Tracing.h" +#include "Utils.h" + +/* + * Some notes about HIDL interface objects and lifetimes across processes: + * + * All HIDL interface objects inherit from IBase, which itself inherits from + * ::android::RefBase. As such, all HIDL interface objects are reference counted + * and must be owned through ::android::sp (or referenced through ::android::wp). + * Allocating RefBase objects on the stack will log errors and may result in + * crashes, and deleting a RefBase object through another means (e.g., "delete", + * "free", or RAII-cleanup through std::unique_ptr or some equivalent) will + * result in double-free and/or use-after-free undefined behavior. + * + * HIDL/Binder manages the reference count of HIDL interface objects + * automatically across processes. If a process that references (but did not + * create) the HIDL interface object dies, HIDL/Binder ensures any reference + * count it held is properly released. (Caveat: it might be possible that + * HIDL/Binder behave strangely with ::android::wp references.) + * + * If the process which created the HIDL interface object dies, any call on this + * object from another process will result in a HIDL transport error with the + * code DEAD_OBJECT. + */ + +/* + * Some notes about asynchronous calls across HIDL: + * + * For synchronous calls across HIDL, if an error occurs after the function was + * called but before it returns, HIDL will return a transport error. For + * example, if the message cannot be delivered to the server process or if the + * server process dies before returning a result, HIDL will return from the + * function with the appropriate transport error in the Return<> object which + * can be queried with Return<>::isOk(), Return<>::isDeadObject(), + * Return<>::description(), etc. + * + * However, HIDL offers no such error management in the case of asynchronous + * calls. By default, if the client launches an asynchronous task and the server + * fails to return a result through the callback, the client will be left + * waiting indefinitely for a result it will never receive. + * + * In the NNAPI, IDevice::prepareModel* and IPreparedModel::execute* (but not + * IPreparedModel::executeSynchronously*) are asynchronous calls across HIDL. + * Specifically, these asynchronous functions are called with a HIDL interface + * callback object (IPrepareModelCallback for IDevice::prepareModel* and + * IExecutionCallback for IPreparedModel::execute*) and are expected to quickly + * return, and the results are returned at a later time through these callback + * objects. + * + * To protect against the case when the server dies after the asynchronous task + * was called successfully but before the results could be returned, HIDL + * provides an object called a "hidl_death_recipient", which can be used to + * detect when an interface object (and more generally, the server process) has + * died. VersionedInterfaces uses hidl_death_recipients to detect when the + * driver process has died, and VersionedInterfaces will unblock any thread + * waiting on the results of a callback object that may otherwise not be + * signaled. + */ + +namespace android { +namespace nn { + +// anonymous namespace +namespace { + +using namespace hal; + +const Timing kNoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; + +void sendFailureMessage(IPreparedModelCallback* cb) { + CHECK(cb != nullptr); + cb->notify_1_3(ErrorStatus::GENERAL_FAILURE, nullptr); +} + +// This class is thread safe +template <typename Callback> +class DeathHandler : public hidl_death_recipient { + public: + void serviceDied(uint64_t /*cookie*/, const wp<hidl::base::V1_0::IBase>& /*who*/) override { + LOG(ERROR) << "DeathHandler::serviceDied -- service unexpectedly died!"; + std::lock_guard<std::mutex> hold(mMutex); + std::for_each(mCallbacks.begin(), mCallbacks.end(), + [](const auto& cb) { cb->notifyAsDeadObject(); }); + } + + [[nodiscard]] base::ScopeGuard<std::function<void()>> protectCallback( + const sp<Callback>& callback) { + registerCallback(callback); + return ::android::base::make_scope_guard( + [this, callback] { unregisterCallback(callback); }); + } + + private: + void registerCallback(const sp<Callback>& callback) { + std::lock_guard<std::mutex> hold(mMutex); + mCallbacks.push_back(callback); + } + + void unregisterCallback(const sp<Callback>& callback) { + std::lock_guard<std::mutex> hold(mMutex); + mCallbacks.erase(std::remove(mCallbacks.begin(), mCallbacks.end(), callback), + mCallbacks.end()); + } + + std::mutex mMutex; + std::vector<sp<Callback>> mCallbacks GUARDED_BY(mMutex); +}; + +} // anonymous namespace + +class IDeviceDeathHandler : public DeathHandler<PreparedModelCallback> {}; +class IPreparedModelDeathHandler : public DeathHandler<ExecutionCallback> {}; + +static std::pair<int, std::shared_ptr<VersionedIPreparedModel>> makeVersionedIPreparedModel( + sp<V1_0::IPreparedModel> preparedModel) { + CHECK(preparedModel != nullptr) + << "makeVersionedIPreparedModel passed invalid preparedModel object."; + + // create death handler object + sp<IPreparedModelDeathHandler> deathHandler = new IPreparedModelDeathHandler(); + + // linkToDeath registers a callback that will be invoked on service death to + // proactively handle service crashes. If the linkToDeath call fails, + // asynchronous calls are susceptible to hangs if the service crashes before + // providing the response. + const Return<bool> ret = preparedModel->linkToDeath(deathHandler, 0); + if (ret.isDeadObject()) { + LOG(ERROR) << "makeVersionedIPreparedModel failed to register a death recipient for the " + "IPreparedModel object because the IPreparedModel object is dead."; + return {ANEURALNETWORKS_DEAD_OBJECT, nullptr}; + } + if (!ret.isOk()) { + LOG(ERROR) << "makeVersionedIPreparedModel failed to register a death recipient for the " + "IPreparedModel object because of failure: " + << ret.description(); + return {ANEURALNETWORKS_OP_FAILED, nullptr}; + } + if (ret != true) { + LOG(ERROR) << "makeVersionedIPreparedModel failed to register a death recipient for the " + "IPreparedModel object."; + return {ANEURALNETWORKS_OP_FAILED, nullptr}; + } + + // return a valid VersionedIPreparedModel object + return {ANEURALNETWORKS_NO_ERROR, std::make_shared<VersionedIPreparedModel>( + std::move(preparedModel), std::move(deathHandler))}; +} + +VersionedIPreparedModel::VersionedIPreparedModel(sp<V1_0::IPreparedModel> preparedModel, + sp<IPreparedModelDeathHandler> deathHandler) + : mPreparedModelV1_0(std::move(preparedModel)), + mPreparedModelV1_2(V1_2::IPreparedModel::castFrom(mPreparedModelV1_0).withDefault(nullptr)), + mPreparedModelV1_3(V1_3::IPreparedModel::castFrom(mPreparedModelV1_0).withDefault(nullptr)), + mDeathHandler(std::move(deathHandler)) {} + +VersionedIPreparedModel::~VersionedIPreparedModel() { + // It is safe to ignore any errors resulting from this unlinkToDeath call + // because the VersionedIPreparedModel object is already being destroyed and + // its underlying IPreparedModel object is no longer being used by the NN + // runtime. + mPreparedModelV1_0->unlinkToDeath(mDeathHandler).isOk(); +} + +std::tuple<int, std::vector<OutputShape>, Timing> VersionedIPreparedModel::executeAsynchronously( + const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration) const { + const auto failDeadObject = []() -> std::tuple<int, std::vector<OutputShape>, Timing> { + return {ANEURALNETWORKS_DEAD_OBJECT, {}, kNoTiming}; + }; + const auto failWithStatus = [](ErrorStatus status) { + return getExecutionResult(status, {}, kNoTiming); + }; + const auto getResults = [failDeadObject](const ExecutionCallback& cb) { + if (cb.isDeadObject()) { + return failDeadObject(); + } + return getExecutionResult(cb.getStatus(), cb.getOutputShapes(), cb.getTiming()); + }; + + const sp<ExecutionCallback> callback = new ExecutionCallback(); + const auto scoped = mDeathHandler->protectCallback(callback); + + // version 1.3+ HAL + if (mPreparedModelV1_3 != nullptr) { + const auto otp = makeTimePoint(deadline); + Return<ErrorStatus> ret = mPreparedModelV1_3->execute_1_3(request, measure, otp, + loopTimeoutDuration, callback); + if (ret.isDeadObject()) { + LOG(ERROR) << "execute_1_3 failure: " << ret.description(); + return failDeadObject(); + } + if (!ret.isOk()) { + LOG(ERROR) << "execute_1_3 failure: " << ret.description(); + return failWithStatus(ErrorStatus::GENERAL_FAILURE); + } + if (ret != ErrorStatus::NONE) { + LOG(ERROR) << "execute_1_3 returned " << toString(static_cast<ErrorStatus>(ret)); + return failWithStatus(ret); + } + callback->wait(); + return getResults(*callback); + } + + // version 1.2 HAL + if (mPreparedModelV1_2 != nullptr) { + const bool compliant = compliantWithV1_2(request); + if (!compliant) { + LOG(ERROR) << "Could not handle execute_1_2!"; + return failWithStatus(ErrorStatus::GENERAL_FAILURE); + } + const V1_0::Request request12 = convertToV1_2(request); + Return<V1_0::ErrorStatus> ret = + mPreparedModelV1_2->execute_1_2(request12, measure, callback); + if (ret.isDeadObject()) { + LOG(ERROR) << "execute_1_2 failure: " << ret.description(); + return failDeadObject(); + } + if (!ret.isOk()) { + LOG(ERROR) << "execute_1_2 failure: " << ret.description(); + return failWithStatus(ErrorStatus::GENERAL_FAILURE); + } + const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret); + if (status != V1_0::ErrorStatus::NONE) { + LOG(ERROR) << "execute_1_2 returned " << toString(status); + return failWithStatus(convertToV1_3(status)); + } + callback->wait(); + return getResults(*callback); + } + + // version 1.0 HAL + if (mPreparedModelV1_0 != nullptr) { + const bool compliant = compliantWithV1_0(request); + if (!compliant) { + LOG(ERROR) << "Could not handle execute!"; + return failWithStatus(ErrorStatus::GENERAL_FAILURE); + } + const V1_0::Request request10 = convertToV1_0(request); + Return<V1_0::ErrorStatus> ret = mPreparedModelV1_0->execute(request10, callback); + if (ret.isDeadObject()) { + LOG(ERROR) << "execute failure: " << ret.description(); + return failDeadObject(); + } + if (!ret.isOk()) { + LOG(ERROR) << "execute failure: " << ret.description(); + return failWithStatus(ErrorStatus::GENERAL_FAILURE); + } + const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret); + if (status != V1_0::ErrorStatus::NONE) { + LOG(ERROR) << "execute returned " << toString(status); + return failWithStatus(convertToV1_3(status)); + } + callback->wait(); + return getResults(*callback); + } + + // No prepared model available + LOG(ERROR) << "executeAsynchronously called with no preparedModel"; + return failWithStatus(ErrorStatus::GENERAL_FAILURE); +} + +std::tuple<int, std::vector<OutputShape>, Timing> VersionedIPreparedModel::executeSynchronously( + const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration) const { + const std::tuple<int, std::vector<OutputShape>, Timing> kDeadObject = { + ANEURALNETWORKS_DEAD_OBJECT, {}, kNoTiming}; + const auto kFailure = getExecutionResult(ErrorStatus::GENERAL_FAILURE, {}, kNoTiming); + + // version 1.3+ HAL + if (mPreparedModelV1_3 != nullptr) { + std::tuple<int, std::vector<OutputShape>, Timing> result; + const auto otp = makeTimePoint(deadline); + Return<void> ret = mPreparedModelV1_3->executeSynchronously_1_3( + request, measure, otp, loopTimeoutDuration, + [&result](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes, + const Timing& timing) { + result = getExecutionResult(error, outputShapes, timing); + }); + if (ret.isDeadObject()) { + LOG(ERROR) << "executeSynchronously_1_3 failure: " << ret.description(); + return kDeadObject; + } + if (!ret.isOk()) { + LOG(ERROR) << "executeSynchronously_1_3 failure: " << ret.description(); + return kFailure; + } + return result; + } + + // version 1.2 HAL + if (mPreparedModelV1_2 != nullptr) { + const bool compliant = compliantWithV1_2(request); + if (!compliant) { + LOG(ERROR) << "Could not handle executeSynchronously!"; + return kFailure; + } + const V1_0::Request request12 = convertToV1_2(request); + + std::tuple<int, std::vector<OutputShape>, Timing> result; + Return<void> ret = mPreparedModelV1_2->executeSynchronously( + request12, measure, + [&result](V1_0::ErrorStatus error, const hidl_vec<OutputShape>& outputShapes, + const Timing& timing) { + result = getExecutionResult(convertToV1_3(error), outputShapes, timing); + }); + if (ret.isDeadObject()) { + LOG(ERROR) << "executeSynchronously failure: " << ret.description(); + return kDeadObject; + } + if (!ret.isOk()) { + LOG(ERROR) << "executeSynchronously failure: " << ret.description(); + return kFailure; + } + return result; + } + + // Fallback to asynchronous execution. + return executeAsynchronously(request, measure, deadline, loopTimeoutDuration); +} + +std::tuple<int, std::vector<OutputShape>, Timing> VersionedIPreparedModel::execute( + const Request& request, MeasureTiming measure, const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, bool preferSynchronous) const { + if (preferSynchronous) { + VLOG(EXECUTION) << "Before executeSynchronously() " << SHOW_IF_DEBUG(toString(request)); + return executeSynchronously(request, measure, deadline, loopTimeoutDuration); + } + + VLOG(EXECUTION) << "Before executeAsynchronously() " << SHOW_IF_DEBUG(toString(request)); + return executeAsynchronously(request, measure, deadline, loopTimeoutDuration); +} + +// This is the amount of time the ExecutionBurstController should spend polling +// the FMQ to see if it has data available before it should fall back to +// waiting on the futex. +static std::chrono::microseconds getPollingTimeWindow() { + constexpr int32_t defaultPollingTimeWindow = 50; +#ifdef NN_DEBUGGABLE + constexpr int32_t minPollingTimeWindow = 0; + const int32_t selectedPollingTimeWindow = + base::GetIntProperty("debug.nn.burst-conrtoller-polling-window", + defaultPollingTimeWindow, minPollingTimeWindow); + return std::chrono::microseconds{selectedPollingTimeWindow}; +#else + return std::chrono::microseconds{defaultPollingTimeWindow}; +#endif // NN_DEBUGGABLE +} + +std::shared_ptr<ExecutionBurstController> VersionedIPreparedModel::configureExecutionBurst( + bool preferPowerOverLatency) const { + if (mPreparedModelV1_2 == nullptr) { + return nullptr; + } + const auto pollingTimeWindow = + (preferPowerOverLatency ? std::chrono::microseconds{0} : getPollingTimeWindow()); + return ExecutionBurstController::create(mPreparedModelV1_2, pollingTimeWindow); +} + +static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_3::IDevice* device) { + CHECK(device != nullptr); + NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities_1_3"); + const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}}; + std::pair<ErrorStatus, Capabilities> result = kFailure; + const Return<void> ret = device->getCapabilities_1_3( + [&result](ErrorStatus error, const Capabilities& capabilities) { + result = std::make_pair(error, capabilities); + }); + if (!ret.isOk()) { + LOG(ERROR) << "getCapabilities_1_3 failure: " << ret.description(); + return kFailure; + } + return result; +} + +std::tuple<int, hal::hidl_handle, sp<hal::IFencedExecutionCallback>, hal::Timing> +VersionedIPreparedModel::executeFenced( + const hal::Request& request, const hal::hidl_vec<hal::hidl_handle>& waitFor, + MeasureTiming measure, const std::optional<Deadline>& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + const hal::OptionalTimeoutDuration& timeoutDurationAfterFence) { + // version 1.3+ HAL + hal::hidl_handle syncFence; + sp<hal::IFencedExecutionCallback> dispatchCallback; + hal::Timing timing = {UINT64_MAX, UINT64_MAX}; + if (mPreparedModelV1_3 != nullptr) { + ErrorStatus errorStatus; + const auto otp = makeTimePoint(deadline); + Return<void> ret = mPreparedModelV1_3->executeFenced( + request, waitFor, measure, otp, loopTimeoutDuration, timeoutDurationAfterFence, + [&syncFence, &errorStatus, &dispatchCallback]( + ErrorStatus error, const hidl_handle& handle, + const sp<hal::IFencedExecutionCallback>& callback) { + syncFence = handle; + errorStatus = error; + dispatchCallback = callback; + }); + if (!ret.isOk()) { + LOG(ERROR) << "executeFenced failure: " << ret.description(); + return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hal::hidl_handle(nullptr), nullptr, + timing); + } + if (errorStatus != ErrorStatus::NONE) { + LOG(ERROR) << "executeFenced returned " + << toString(static_cast<ErrorStatus>(errorStatus)); + return std::make_tuple(convertErrorStatusToResultCode(errorStatus), + hal::hidl_handle(nullptr), nullptr, timing); + } + return std::make_tuple(ANEURALNETWORKS_NO_ERROR, syncFence, dispatchCallback, timing); + } + + // fallback to synchronous execution if sync_fence is not supported + // first wait for all sync fences to be ready. + LOG(INFO) << "No drivers able to handle sync fences, falling back to regular execution"; + for (const auto& fenceHandle : waitFor) { + if (!fenceHandle.getNativeHandle()) { + return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hal::hidl_handle(nullptr), nullptr, + timing); + } + int syncFd = fenceHandle.getNativeHandle()->data[0]; + if (syncFd <= 0) { + return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hal::hidl_handle(nullptr), nullptr, + timing); + } + auto r = syncWait(syncFd, -1); + if (r != FenceState::SIGNALED) { + LOG(ERROR) << "syncWait failed, fd: " << syncFd; + return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hal::hidl_handle(nullptr), nullptr, + timing); + } + } + int errorCode; + std::tie(errorCode, std::ignore, timing) = + executeSynchronously(request, measure, deadline, loopTimeoutDuration); + return std::make_tuple(errorCode, hal::hidl_handle(nullptr), nullptr, timing); +} + +static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_2::IDevice* device) { + CHECK(device != nullptr); + NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities_1_2"); + const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}}; + std::pair<ErrorStatus, Capabilities> result = kFailure; + const Return<void> ret = device->getCapabilities_1_2( + [&result](V1_0::ErrorStatus error, const V1_2::Capabilities& capabilities) { + result = std::make_pair(convertToV1_3(error), convertToV1_3(capabilities)); + }); + if (!ret.isOk()) { + LOG(ERROR) << "getCapabilities_1_2 failure: " << ret.description(); + return kFailure; + } + return result; +} + +static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_1::IDevice* device) { + CHECK(device != nullptr); + NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities_1_1"); + const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}}; + std::pair<ErrorStatus, Capabilities> result = kFailure; + const Return<void> ret = device->getCapabilities_1_1( + [&result](V1_0::ErrorStatus error, const V1_1::Capabilities& capabilities) { + // Time taken to convert capabilities is trivial + result = std::make_pair(convertToV1_3(error), convertToV1_3(capabilities)); + }); + if (!ret.isOk()) { + LOG(ERROR) << "getCapabilities_1_1 failure: " << ret.description(); + return kFailure; + } + return result; +} + +static std::pair<ErrorStatus, Capabilities> getCapabilitiesFunction(V1_0::IDevice* device) { + CHECK(device != nullptr); + NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getCapabilities"); + const std::pair<ErrorStatus, Capabilities> kFailure = {ErrorStatus::GENERAL_FAILURE, {}}; + std::pair<ErrorStatus, Capabilities> result = kFailure; + const Return<void> ret = device->getCapabilities( + [&result](V1_0::ErrorStatus error, const V1_0::Capabilities& capabilities) { + // Time taken to convert capabilities is trivial + result = std::make_pair(convertToV1_3(error), convertToV1_3(capabilities)); + }); + if (!ret.isOk()) { + LOG(ERROR) << "getCapabilities failure: " << ret.description(); + return kFailure; + } + return result; +} + +static std::pair<ErrorStatus, hidl_vec<Extension>> getSupportedExtensionsFunction( + V1_2::IDevice* device) { + CHECK(device != nullptr); + NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_INITIALIZATION, "getSupportedExtensions"); + const std::pair<ErrorStatus, hidl_vec<Extension>> kFailure = {ErrorStatus::GENERAL_FAILURE, {}}; + std::pair<ErrorStatus, hidl_vec<Extension>> result = kFailure; + const Return<void> ret = device->getSupportedExtensions( + [&result](V1_0::ErrorStatus error, const hidl_vec<Extension>& extensions) { + result = std::make_pair(convertToV1_3(error), extensions); + }); + if (!ret.isOk()) { + LOG(ERROR) << "getSupportedExtensions failure: " << ret.description(); + return kFailure; + } + return result; +} + +static std::pair<ErrorStatus, hidl_vec<Extension>> getSupportedExtensionsFunction( + V1_0::IDevice* device) { + CHECK(device != nullptr); + return {ErrorStatus::NONE, {/* No extensions. */}}; +} + +static int32_t getTypeFunction(V1_2::IDevice* device) { + CHECK(device != nullptr); + constexpr int32_t kFailure = -1; + int32_t result = kFailure; + const Return<void> ret = + device->getType([&result](V1_0::ErrorStatus error, DeviceType deviceType) { + if (error == V1_0::ErrorStatus::NONE) { + result = static_cast<int32_t>(deviceType); + } + }); + if (!ret.isOk()) { + LOG(ERROR) << "getType failure: " << ret.description(); + return kFailure; + } + return result; +} + +static int32_t getTypeFunction(V1_0::IDevice* device) { + CHECK(device != nullptr); + return ANEURALNETWORKS_DEVICE_UNKNOWN; +} + +static std::pair<ErrorStatus, hidl_string> getVersionStringFunction(V1_2::IDevice* device) { + CHECK(device != nullptr); + const std::pair<ErrorStatus, hidl_string> kFailure = {ErrorStatus::GENERAL_FAILURE, ""}; + std::pair<ErrorStatus, hidl_string> result = kFailure; + const Return<void> ret = device->getVersionString( + [&result](V1_0::ErrorStatus error, const hidl_string& version) { + result = std::make_pair(convertToV1_3(error), version); + }); + if (!ret.isOk()) { + LOG(ERROR) << "getVersion failure: " << ret.description(); + return kFailure; + } + return result; +} + +static std::pair<ErrorStatus, hidl_string> getVersionStringFunction(V1_0::IDevice* device) { + CHECK(device != nullptr); + return {ErrorStatus::NONE, "UNKNOWN"}; +} + +static std::tuple<ErrorStatus, uint32_t, uint32_t> getNumberOfCacheFilesNeededFunction( + V1_2::IDevice* device) { + CHECK(device != nullptr); + constexpr std::tuple<ErrorStatus, uint32_t, uint32_t> kFailure = {ErrorStatus::GENERAL_FAILURE, + 0, 0}; + std::tuple<ErrorStatus, uint32_t, uint32_t> result = kFailure; + const Return<void> ret = device->getNumberOfCacheFilesNeeded( + [&result](V1_0::ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) { + result = {convertToV1_3(error), numModelCache, numDataCache}; + }); + if (!ret.isOk()) { + LOG(ERROR) << "getNumberOfCacheFilesNeeded failure: " << ret.description(); + return kFailure; + } + return result; +} + +static std::tuple<ErrorStatus, uint32_t, uint32_t> getNumberOfCacheFilesNeededFunction( + V1_0::IDevice* device) { + CHECK(device != nullptr); + return {ErrorStatus::NONE, 0, 0}; +} + +struct InitialData { + hal::Capabilities capabilities; + hal::hidl_vec<hal::Extension> supportedExtensions; + int32_t type; + hal::hidl_string versionString; + std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded; +}; + +template <typename Device> +static std::optional<InitialData> initializeFunction(Device* device) { + CHECK(device != nullptr); + + auto [capabilitiesStatus, capabilities] = getCapabilitiesFunction(device); + if (capabilitiesStatus != ErrorStatus::NONE) { + LOG(ERROR) << "IDevice::getCapabilities* returned the error " + << toString(capabilitiesStatus); + return std::nullopt; + } + VLOG(MANAGER) << "Capab " << toString(capabilities); + + auto [versionStatus, versionString] = getVersionStringFunction(device); + if (versionStatus != ErrorStatus::NONE) { + LOG(ERROR) << "IDevice::getVersionString returned the error " << toString(versionStatus); + return std::nullopt; + } + + const int32_t type = getTypeFunction(device); + if (type == -1) { + LOG(ERROR) << "IDevice::getType returned an error"; + return std::nullopt; + } + + auto [extensionsStatus, supportedExtensions] = getSupportedExtensionsFunction(device); + if (extensionsStatus != ErrorStatus::NONE) { + LOG(ERROR) << "IDevice::getSupportedExtensions returned the error " + << toString(extensionsStatus); + return std::nullopt; + } + + const auto [cacheFilesStatus, numModelCacheFiles, numDataCacheFiles] = + getNumberOfCacheFilesNeededFunction(device); + if (cacheFilesStatus != ErrorStatus::NONE) { + LOG(ERROR) << "IDevice::getNumberOfCacheFilesNeeded returned the error " + << toString(cacheFilesStatus); + return std::nullopt; + } + + // The following limit is enforced by VTS + constexpr uint32_t maxNumCacheFiles = + static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES); + if (numModelCacheFiles > maxNumCacheFiles || numDataCacheFiles > maxNumCacheFiles) { + LOG(ERROR) + << "IDevice::getNumberOfCacheFilesNeeded returned invalid number of cache files: " + "numModelCacheFiles = " + << numModelCacheFiles << ", numDataCacheFiles = " << numDataCacheFiles + << ", maxNumCacheFiles = " << maxNumCacheFiles; + return std::nullopt; + } + + return InitialData{ + /*.capabilities=*/std::move(capabilities), + /*.supportedExtensions=*/std::move(supportedExtensions), + /*.type=*/type, + /*.versionString=*/std::move(versionString), + /*.numberOfCacheFilesNeeded=*/{numModelCacheFiles, numDataCacheFiles}, + }; +} + +template <typename Core> +std::optional<InitialData> initialize(const Core& core) { + // version 1.3+ HAL + if (const auto device = core.template getDevice<V1_3::IDevice>()) { + return initializeFunction(device.get()); + } + + // version 1.2 HAL + if (const auto device = core.template getDevice<V1_2::IDevice>()) { + return initializeFunction(device.get()); + } + + // version 1.1 HAL + if (const auto device = core.template getDevice<V1_1::IDevice>()) { + return initializeFunction(device.get()); + } + + // version 1.0 HAL + if (const auto device = core.template getDevice<V1_0::IDevice>()) { + return initializeFunction(device.get()); + } + + // No device available + LOG(ERROR) << "Device not available!"; + return std::nullopt; +} + +std::shared_ptr<VersionedIDevice> VersionedIDevice::create(std::string serviceName, + const DeviceFactory& makeDevice) { + CHECK(makeDevice != nullptr) + << "VersionedIDevice::create passed invalid device factory object."; + + // get handle to IDevice object + sp<V1_0::IDevice> device = makeDevice(/*blocking=*/true); + if (device == nullptr) { + VLOG(DRIVER) << "VersionedIDevice::create got a null IDevice for " << serviceName; + return nullptr; + } + + auto core = Core::create(std::move(device)); + if (!core.has_value()) { + LOG(ERROR) << "VersionedIDevice::create failed to create Core."; + return nullptr; + } + + auto initialData = initialize(*core); + if (!initialData.has_value()) { + LOG(ERROR) << "VersionedIDevice::create failed to initialize."; + return nullptr; + } + + auto [capabilities, supportedExtensions, type, versionString, numberOfCacheFilesNeeded] = + std::move(*initialData); + return std::make_shared<VersionedIDevice>( + std::move(capabilities), std::move(supportedExtensions), type, std::move(versionString), + numberOfCacheFilesNeeded, std::move(serviceName), makeDevice, std::move(core.value())); +} + +VersionedIDevice::VersionedIDevice(hal::Capabilities capabilities, + std::vector<hal::Extension> supportedExtensions, int32_t type, + std::string versionString, + std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded, + std::string serviceName, const DeviceFactory& makeDevice, + Core core) + : kCapabilities(std::move(capabilities)), + kSupportedExtensions(std::move(supportedExtensions)), + kType(type), + kVersionString(std::move(versionString)), + kNumberOfCacheFilesNeeded(numberOfCacheFilesNeeded), + kServiceName(std::move(serviceName)), + kMakeDevice(makeDevice), + mCore(std::move(core)) {} + +std::optional<VersionedIDevice::Core> VersionedIDevice::Core::create(sp<V1_0::IDevice> device) { + CHECK(device != nullptr) << "VersionedIDevice::Core::create passed invalid device object."; + + // create death handler object + sp<IDeviceDeathHandler> deathHandler = new IDeviceDeathHandler(); + + // linkToDeath registers a callback that will be invoked on service death to + // proactively handle service crashes. If the linkToDeath call fails, + // asynchronous calls are susceptible to hangs if the service crashes before + // providing the response. + const Return<bool> ret = device->linkToDeath(deathHandler, 0); + if (!ret.isOk()) { + LOG(ERROR) << "VersionedIDevice::Core::create failed to register a death recipient for the " + "IDevice object because of failure: " + << ret.description(); + return {}; + } + if (ret != true) { + LOG(ERROR) << "VersionedIDevice::Core::create failed to register a death recipient for the " + "IDevice object."; + return {}; + } + + // return a valid Core object + return Core(std::move(device), std::move(deathHandler)); +} + +// HIDL guarantees all V1_1 interfaces inherit from their corresponding V1_0 interfaces. +VersionedIDevice::Core::Core(sp<V1_0::IDevice> device, sp<IDeviceDeathHandler> deathHandler) + : mDeviceV1_0(std::move(device)), + mDeviceV1_1(V1_1::IDevice::castFrom(mDeviceV1_0).withDefault(nullptr)), + mDeviceV1_2(V1_2::IDevice::castFrom(mDeviceV1_0).withDefault(nullptr)), + mDeviceV1_3(V1_3::IDevice::castFrom(mDeviceV1_0).withDefault(nullptr)), + mDeathHandler(std::move(deathHandler)) {} + +VersionedIDevice::Core::~Core() { + if (mDeathHandler != nullptr) { + CHECK(mDeviceV1_0 != nullptr); + // It is safe to ignore any errors resulting from this unlinkToDeath call + // because the VersionedIDevice::Core object is already being destroyed and + // its underlying IDevice object is no longer being used by the NN runtime. + mDeviceV1_0->unlinkToDeath(mDeathHandler).isOk(); + } +} + +VersionedIDevice::Core::Core(Core&& other) noexcept + : mDeviceV1_0(std::move(other.mDeviceV1_0)), + mDeviceV1_1(std::move(other.mDeviceV1_1)), + mDeviceV1_2(std::move(other.mDeviceV1_2)), + mDeviceV1_3(std::move(other.mDeviceV1_3)), + mDeathHandler(std::move(other.mDeathHandler)) { + other.mDeathHandler = nullptr; +} + +VersionedIDevice::Core& VersionedIDevice::Core::operator=(Core&& other) noexcept { + if (this != &other) { + mDeviceV1_0 = std::move(other.mDeviceV1_0); + mDeviceV1_1 = std::move(other.mDeviceV1_1); + mDeviceV1_2 = std::move(other.mDeviceV1_2); + mDeviceV1_3 = std::move(other.mDeviceV1_3); + mDeathHandler = std::move(other.mDeathHandler); + other.mDeathHandler = nullptr; + } + return *this; +} + +template <typename T_IDevice> +std::pair<sp<T_IDevice>, sp<IDeviceDeathHandler>> VersionedIDevice::Core::getDeviceAndDeathHandler() + const { + return {getDevice<T_IDevice>(), mDeathHandler}; +} + +template <typename T_Return, typename T_IDevice, typename T_Callback> +Return<T_Return> callProtected(const char* context, + const std::function<Return<T_Return>(const sp<T_IDevice>&)>& fn, + const sp<T_IDevice>& device, const sp<T_Callback>& callback, + const sp<IDeviceDeathHandler>& deathHandler) { + const auto scoped = deathHandler->protectCallback(callback); + Return<T_Return> ret = fn(device); + // Suppose there was a transport error. We have the following cases: + // 1. Either not due to a dead device, or due to a device that was + // already dead at the time of the call to protectCallback(). In + // this case, the callback was never signalled. + // 2. Due to a device that died after the call to protectCallback() but + // before fn() completed. In this case, the callback was (or will + // be) signalled by the deathHandler. + // Furthermore, what if there was no transport error, but the ErrorStatus is + // other than NONE? We'll conservatively signal the callback anyway, just in + // case the driver was sloppy and failed to do so. + if (!ret.isOk() || ret != T_Return::NONE) { + // What if the deathHandler has signalled or will signal the callback? + // This is fine -- we're permitted to signal multiple times; and we're + // sending the same signal that the deathHandler does. + // + // What if the driver signalled the callback? Then this signal is + // ignored. + + if (ret.isOk()) { + LOG(ERROR) << context << " returned " << toString(static_cast<T_Return>(ret)); + } else { + LOG(ERROR) << context << " failure: " << ret.description(); + } + sendFailureMessage(callback.get()); + } + callback->wait(); + return ret; +} +template <typename T_Return, typename T_IDevice> +Return<T_Return> callProtected(const char*, + const std::function<Return<T_Return>(const sp<T_IDevice>&)>& fn, + const sp<T_IDevice>& device, const std::nullptr_t&, + const sp<IDeviceDeathHandler>&) { + return fn(device); +} + +template <typename T_Return, typename T_IDevice, typename T_Callback> +Return<T_Return> VersionedIDevice::recoverable( + const char* context, const std::function<Return<T_Return>(const sp<T_IDevice>&)>& fn, + const T_Callback& callback) const EXCLUDES(mMutex) { + CHECK_EQ(callback == nullptr, (std::is_same_v<T_Callback, std::nullptr_t>)); + + sp<T_IDevice> device; + sp<IDeviceDeathHandler> deathHandler; + std::tie(device, deathHandler) = getDeviceAndDeathHandler<T_IDevice>(); + + Return<T_Return> ret = callProtected(context, fn, device, callback, deathHandler); + + if (ret.isDeadObject()) { + { + std::unique_lock lock(mMutex); + // It's possible that another device has already done the recovery. + // It's harmless but wasteful for us to do so in this case. + auto pingReturn = mCore.getDevice<T_IDevice>()->ping(); + if (pingReturn.isDeadObject()) { + VLOG(DRIVER) << "VersionedIDevice::recoverable(" << context << ") -- Recovering " + << kServiceName; + sp<V1_0::IDevice> recoveredDevice = kMakeDevice(/*blocking=*/false); + if (recoveredDevice == nullptr) { + VLOG(DRIVER) << "VersionedIDevice::recoverable got a null IDEVICE for " + << kServiceName; + return ret; + } + + auto core = Core::create(std::move(recoveredDevice)); + if (!core.has_value()) { + LOG(ERROR) << "VersionedIDevice::recoverable failed to create Core."; + return ret; + } + + mCore = std::move(core.value()); + } else { + VLOG(DRIVER) << "VersionedIDevice::recoverable(" << context + << ") -- Someone else recovered " << kServiceName; + // Might still have a transport error, which we need to check + // before pingReturn goes out of scope. + (void)pingReturn.isOk(); + } + std::tie(device, deathHandler) = mCore.getDeviceAndDeathHandler<T_IDevice>(); + } + ret = callProtected(context, fn, device, callback, deathHandler); + // It's possible that the device died again, but we're only going to + // attempt recovery once per call to recoverable(). + } + return ret; +} + +int VersionedIDevice::wait() const { + std::unique_lock lock(mMutex); + // It's possible that another device has already done the recovery. + // It's harmless but wasteful for us to do so in this case. + auto pingReturn = mCore.getDevice<V1_0::IDevice>()->ping(); + if (pingReturn.isDeadObject()) { + VLOG(DRIVER) << "VersionedIDevice::wait -- Recovering " << kServiceName; + sp<V1_0::IDevice> recoveredDevice = kMakeDevice(/*blocking=*/true); + if (recoveredDevice == nullptr) { + LOG(ERROR) << "VersionedIDevice::wait got a null IDevice for " << kServiceName; + return ANEURALNETWORKS_OP_FAILED; + } + + auto core = Core::create(std::move(recoveredDevice)); + if (!core.has_value()) { + LOG(ERROR) << "VersionedIDevice::wait failed to create Core."; + return ANEURALNETWORKS_OP_FAILED; + } + + mCore = std::move(core.value()); + } else if (!pingReturn.isOk()) { + LOG(ERROR) << "VersionedIDevice::wait failed -- IDevice::ping returned " + << pingReturn.description(); + return ANEURALNETWORKS_OP_FAILED; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +const Capabilities& VersionedIDevice::getCapabilities() const { + return kCapabilities; +} + +const std::vector<Extension>& VersionedIDevice::getSupportedExtensions() const { + return kSupportedExtensions; +} + +std::pair<ErrorStatus, hidl_vec<bool>> VersionedIDevice::getSupportedOperations( + const MetaModel& metaModel) const { + const std::pair<ErrorStatus, hidl_vec<bool>> kFailure = {ErrorStatus::GENERAL_FAILURE, {}}; + std::pair<ErrorStatus, hidl_vec<bool>> result; + + const Model& model = metaModel.getModel(); + + auto noneSupported = [&model] { + hidl_vec<bool> supported(model.main.operations.size()); + std::fill(supported.begin(), supported.end(), false); + return std::make_pair(ErrorStatus::NONE, std::move(supported)); + }; + + auto remappedResult = [&model](const std::pair<ErrorStatus, hidl_vec<bool>>& result, + const std::function<uint32_t(uint32_t)>& + slicedModelOperationIndexToModelOperationIndex) { + const ErrorStatus status = result.first; + const hidl_vec<bool>& supported = result.second; + hidl_vec<bool> remappedSupported(model.main.operations.size()); + std::fill(remappedSupported.begin(), remappedSupported.end(), false); + for (size_t i = 0; i < supported.size(); ++i) { + if (supported[i]) { + remappedSupported[slicedModelOperationIndexToModelOperationIndex(i)] = true; + } + } + return std::make_pair(status, std::move(remappedSupported)); + }; + + // version 1.3+ HAL + if (getDevice<V1_3::IDevice>() != nullptr) { + NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_3"); + Return<void> ret = recoverable<void, V1_3::IDevice>( + __FUNCTION__, [&model, &result](const sp<V1_3::IDevice>& device) { + return device->getSupportedOperations_1_3( + model, [&result](ErrorStatus error, const hidl_vec<bool>& supported) { + result = std::make_pair(error, supported); + }); + }); + if (!ret.isOk()) { + LOG(ERROR) << "getSupportedOperations_1_3 failure: " << ret.description(); + return kFailure; + } + return result; + } + + // version 1.2 HAL + if (getDevice<V1_2::IDevice>() != nullptr) { + const bool compliant = compliantWithV1_2(model); + V1_2::Model model12; + std::function<uint32_t(uint32_t)> slicedModelOperationIndexToModelOperationIndex; + if (compliant) { + model12 = convertToV1_2(model); + } else { + const auto slice12 = metaModel.getSliceV1_2(); + if (!slice12.has_value()) { + return noneSupported(); + } + std::tie(model12, slicedModelOperationIndexToModelOperationIndex) = *slice12; + } + NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_2"); + Return<void> ret = recoverable<void, V1_2::IDevice>( + __FUNCTION__, [&model12, &result](const sp<V1_2::IDevice>& device) { + return device->getSupportedOperations_1_2( + model12, + [&result](V1_0::ErrorStatus error, const hidl_vec<bool>& supported) { + result = std::make_pair(convertToV1_3(error), supported); + }); + }); + if (!ret.isOk()) { + LOG(ERROR) << "getSupportedOperations_1_2 failure: " << ret.description(); + return kFailure; + } + if (!compliant) { + return remappedResult(result, slicedModelOperationIndexToModelOperationIndex); + } + return result; + } + + // version 1.1 HAL + if (getDevice<V1_1::IDevice>() != nullptr) { + const bool compliant = compliantWithV1_1(model); + V1_1::Model model11; + std::function<uint32_t(uint32_t)> slicedModelOperationIndexToModelOperationIndex; + if (compliant) { + model11 = convertToV1_1(model); + } else { + const auto slice11 = metaModel.getSliceV1_1(); + if (!slice11.has_value()) { + return noneSupported(); + } + std::tie(model11, slicedModelOperationIndexToModelOperationIndex) = *slice11; + } + NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations_1_1"); + Return<void> ret = recoverable<void, V1_1::IDevice>( + __FUNCTION__, [&model11, &result](const sp<V1_1::IDevice>& device) { + return device->getSupportedOperations_1_1( + model11, + [&result](V1_0::ErrorStatus error, const hidl_vec<bool>& supported) { + result = std::make_pair(convertToV1_3(error), supported); + }); + }); + if (!ret.isOk()) { + LOG(ERROR) << "getSupportedOperations_1_1 failure: " << ret.description(); + return kFailure; + } + if (!compliant) { + return remappedResult(result, slicedModelOperationIndexToModelOperationIndex); + } + return result; + } + + // version 1.0 HAL + if (getDevice<V1_0::IDevice>() != nullptr) { + const bool compliant = compliantWithV1_0(model); + V1_0::Model model10; + std::function<uint32_t(uint32_t)> slicedModelOperationIndexToModelOperationIndex; + if (compliant) { + model10 = convertToV1_0(model); + } else { + const auto slice10 = metaModel.getSliceV1_0(); + if (!slice10.has_value()) { + return noneSupported(); + } + std::tie(model10, slicedModelOperationIndexToModelOperationIndex) = *slice10; + } + NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "getSupportedOperations"); + Return<void> ret = recoverable<void, V1_0::IDevice>( + __FUNCTION__, [&model10, &result](const sp<V1_0::IDevice>& device) { + return device->getSupportedOperations( + model10, + [&result](V1_0::ErrorStatus error, const hidl_vec<bool>& supported) { + result = std::make_pair(convertToV1_3(error), supported); + }); + }); + if (!ret.isOk()) { + LOG(ERROR) << "getSupportedOperations failure: " << ret.description(); + return kFailure; + } + if (!compliant) { + return remappedResult(result, slicedModelOperationIndexToModelOperationIndex); + } + return result; + } + + // No device available + LOG(ERROR) << "Device not available!"; + return kFailure; +} + +// Opens cache file by filename and sets the handle to the opened fd. Returns false on fail. The +// handle is expected to come in as empty, and is only set to a fd when the function returns true. +// The file descriptor is always opened with both read and write permission. +static bool createCacheHandle(const std::string& cache, bool createIfNotExist, + hidl_handle* handle) { + CHECK(handle->getNativeHandle() == nullptr); + int fd = open(cache.c_str(), createIfNotExist ? (O_RDWR | O_CREAT) : O_RDWR, S_IRUSR | S_IWUSR); + NN_RET_CHECK_GE(fd, 0); + native_handle_t* cacheNativeHandle = native_handle_create(1, 0); + if (cacheNativeHandle == nullptr) { + close(fd); + return false; + } + cacheNativeHandle->data[0] = fd; + handle->setTo(cacheNativeHandle, /*shouldOwn=*/true); + return true; +} + +// Opens a list of cache files and returns the handle vector. Returns empty vector on fail. +// The file descriptors are always opened with both read and write permission. +static hidl_vec<hidl_handle> createCacheHandleVec(uint32_t numCacheFiles, + const std::string& baseFileName, + bool createIfNotExist) { + CHECK(numCacheFiles <= static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES)); + hidl_vec<hidl_handle> handles(numCacheFiles); + for (uint32_t i = 0; i < numCacheFiles; i++) { + std::string filename = baseFileName + std::to_string(i); + VLOG(COMPILATION) << "Cache " << i << ": " << filename; + if (!createCacheHandle(filename, createIfNotExist, &handles[i])) { + return hidl_vec<hidl_handle>(); + } + } + return handles; +} + +// Maps token to cache file names and sets the handle vectors to the opened fds. Returns false on +// fail and leaves the vectors empty. Each vector is expected to come in as empty. +static bool getCacheHandles(const std::string& cacheDir, const CacheToken& token, + const std::pair<uint32_t, uint32_t>& numCacheFiles, + bool createIfNotExist, hidl_vec<hidl_handle>* modelCache, + hidl_vec<hidl_handle>* dataCache) { + // The filename includes ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN * 2 characters for token, + // and 1 character for model/data cache identifier. + std::string filename(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN * 2 + 1, '0'); + for (uint32_t i = 0; i < ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN; i++) { + filename[i * 2] = 'A' + (token[i] & 0x0F); + filename[i * 2 + 1] = 'A' + (token[i] >> 4); + } + CHECK(cacheDir.empty() || cacheDir.back() == '/'); + std::string cacheFileName = cacheDir + filename; + + cacheFileName[ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN * 2] = '1'; + *modelCache = createCacheHandleVec(numCacheFiles.first, cacheFileName, createIfNotExist); + if (modelCache->size() != numCacheFiles.first) { + return false; + } + cacheFileName[ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN * 2] = '2'; + *dataCache = createCacheHandleVec(numCacheFiles.second, cacheFileName, createIfNotExist); + if (dataCache->size() != numCacheFiles.second) { + modelCache->resize(0); + return false; + } + return true; +} + +static std::pair<int, std::shared_ptr<VersionedIPreparedModel>> prepareModelFailure( + ErrorStatus status = ErrorStatus::GENERAL_FAILURE) { + return {convertErrorStatusToResultCode(status), nullptr}; +} + +static std::pair<int, std::shared_ptr<VersionedIPreparedModel>> prepareModelResult( + const PreparedModelCallback& callback, const char* prepareName, + const std::string& serviceName) { + callback.wait(); + if (callback.isDeadObject()) { + LOG(ERROR) << prepareName << " on " << serviceName + << " failed because the PreparedModel object is dead"; + return {ANEURALNETWORKS_DEAD_OBJECT, nullptr}; + } + const ErrorStatus status = callback.getStatus(); + const sp<V1_0::IPreparedModel> preparedModel = callback.getPreparedModel(); + + if (status != ErrorStatus::NONE) { + LOG(ERROR) << prepareName << " on " << serviceName << " failed: " + << "prepareReturnStatus=" << toString(status); + return prepareModelFailure(status); + } + if (preparedModel == nullptr) { + LOG(ERROR) << prepareName << " on " << serviceName << " failed: preparedModel is nullptr"; + return prepareModelFailure(); + } + + return makeVersionedIPreparedModel(preparedModel); +} + +std::pair<int, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevice::prepareModelInternal( + const Model& model, ExecutionPreference preference, Priority priority, + const std::optional<Deadline>& deadline, const std::string& cacheDir, + const std::optional<CacheToken>& maybeToken) const { + // Note that some work within VersionedIDevice will be subtracted from the IPC layer + NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "prepareModel"); + const std::pair<int, std::shared_ptr<VersionedIPreparedModel>> kDeadObject = { + ANEURALNETWORKS_DEAD_OBJECT, nullptr}; + + // Get cache files if they exist, otherwise create them. + hidl_vec<hidl_handle> modelCache, dataCache; + if (!maybeToken.has_value() || + !getCacheHandles(cacheDir, *maybeToken, kNumberOfCacheFilesNeeded, + /*createIfNotExist=*/true, &modelCache, &dataCache)) { + modelCache.resize(0); + dataCache.resize(0); + } + + // Get the token if it exists, otherwise get a null token. + static const CacheToken kNullToken{}; + const CacheToken token = maybeToken.value_or(kNullToken); + + const sp<PreparedModelCallback> callback = new PreparedModelCallback(); + + // If 1.3 device, try preparing model + if (getDevice<V1_3::IDevice>() != nullptr) { + const auto otp = makeTimePoint(deadline); + const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_3::IDevice>( + __FUNCTION__, + [&model, preference, priority, &otp, &modelCache, &dataCache, &token, + &callback](const sp<V1_3::IDevice>& device) { + return device->prepareModel_1_3(model, preference, priority, otp, modelCache, + dataCache, token, callback); + }, + callback); + if (ret.isDeadObject()) { + LOG(ERROR) << "prepareModel_1_3 failure: " << ret.description(); + return kDeadObject; + } + if (!ret.isOk()) { + LOG(ERROR) << "prepareModel_1_3 failure: " << ret.description(); + return prepareModelFailure(); + } + if (ret != ErrorStatus::NONE) { + LOG(ERROR) << "prepareModel_1_3 returned " << toString(static_cast<ErrorStatus>(ret)); + return prepareModelFailure(ret); + } + return prepareModelResult(*callback, "prepareModel_1_3", kServiceName); + } + + // If 1.2 device, try preparing model (requires conversion) + if (getDevice<V1_2::IDevice>() != nullptr) { + bool compliant = false; + V1_2::Model model12; + { + // Attribute time spent in model inspection and conversion to + // Runtime, as the time may be substantial (0.03ms for mobilenet, + // but could be larger for other models). + NNTRACE_FULL_SUBTRACT(NNTRACE_LAYER_RUNTIME, NNTRACE_PHASE_COMPILATION, + "VersionedIDevice::prepareModel_1_2"); + compliant = compliantWithV1_2(model); + if (compliant) { + model12 = convertToV1_2(model); // copy is elided + } + } + if (compliant) { + const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_2::IDevice>( + __FUNCTION__, + [&model12, &preference, &modelCache, &dataCache, &token, + &callback](const sp<V1_2::IDevice>& device) { + return device->prepareModel_1_2(model12, preference, modelCache, dataCache, + token, callback); + }, + callback); + if (ret.isDeadObject()) { + LOG(ERROR) << "prepareModel_1_2 failure: " << ret.description(); + return kDeadObject; + } + if (!ret.isOk()) { + LOG(ERROR) << "prepareModel_1_2 failure: " << ret.description(); + return prepareModelFailure(); + } + const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret); + if (status != V1_0::ErrorStatus::NONE) { + LOG(ERROR) << "prepareModel_1_2 returned " << toString(status); + return prepareModelFailure(convertToV1_3(status)); + } + return prepareModelResult(*callback, "prepareModel_1_2", kServiceName); + } + + LOG(ERROR) << "Could not handle prepareModel_1_2!"; + return prepareModelFailure(); + } + + // If 1.1 device, try preparing model (requires conversion) + if (getDevice<V1_1::IDevice>() != nullptr) { + bool compliant = false; + V1_1::Model model11; + { + // Attribute time spent in model inspection and conversion to + // Runtime, as the time may be substantial (0.03ms for mobilenet, + // but could be larger for other models). + NNTRACE_FULL_SUBTRACT(NNTRACE_LAYER_RUNTIME, NNTRACE_PHASE_COMPILATION, + "VersionedIDevice::prepareModel_1_1"); + compliant = compliantWithV1_1(model); + if (compliant) { + model11 = convertToV1_1(model); // copy is elided + } + } + if (compliant) { + const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_1::IDevice>( + __FUNCTION__, + [&model11, &preference, &callback](const sp<V1_1::IDevice>& device) { + return device->prepareModel_1_1(model11, preference, callback); + }, + callback); + if (ret.isDeadObject()) { + LOG(ERROR) << "prepareModel_1_1 failure: " << ret.description(); + return kDeadObject; + } + if (!ret.isOk()) { + LOG(ERROR) << "prepareModel_1_1 failure: " << ret.description(); + return prepareModelFailure(); + } + const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret); + if (status != V1_0::ErrorStatus::NONE) { + LOG(ERROR) << "prepareModel_1_1 returned " << toString(status); + return prepareModelFailure(convertToV1_3(status)); + } + return prepareModelResult(*callback, "prepareModel_1_1", kServiceName); + } + + LOG(ERROR) << "Could not handle prepareModel_1_1!"; + return prepareModelFailure(); + } + + // If 1.0 device, try preparing model (requires conversion) + if (getDevice<V1_0::IDevice>() != nullptr) { + bool compliant = false; + V1_0::Model model10; + { + // Attribute time spent in model inspection and conversion to + // Runtime, as the time may be substantial (0.03ms for mobilenet, + // but could be larger for other models). + NNTRACE_FULL_SUBTRACT(NNTRACE_LAYER_RUNTIME, NNTRACE_PHASE_COMPILATION, + "VersionedIDevice::prepareModel"); + compliant = compliantWithV1_0(model); + if (compliant) { + model10 = convertToV1_0(model); // copy is elided + } + } + if (compliant) { + const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_0::IDevice>( + __FUNCTION__, + [&model10, &callback](const sp<V1_0::IDevice>& device) { + return device->prepareModel(model10, callback); + }, + callback); + if (ret.isDeadObject()) { + LOG(ERROR) << "prepareModel failure: " << ret.description(); + return kDeadObject; + } + if (!ret.isOk()) { + LOG(ERROR) << "prepareModel failure: " << ret.description(); + return prepareModelFailure(); + } + const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret); + if (status != V1_0::ErrorStatus::NONE) { + LOG(ERROR) << "prepareModel returned " << toString(status); + return prepareModelFailure(convertToV1_3(status)); + } + return prepareModelResult(*callback, "prepareModel", kServiceName); + } + + LOG(ERROR) << "Could not handle prepareModel!"; + return prepareModelFailure(); + } + + // Return error because there is no valid device + LOG(ERROR) << "prepareModel called with no device"; + return prepareModelFailure(); +} + +std::pair<int, std::shared_ptr<VersionedIPreparedModel>> +VersionedIDevice::prepareModelFromCacheInternal(const std::optional<Deadline>& deadline, + const std::string& cacheDir, + const CacheToken& token) const { + // Note that some work within VersionedIDevice will be subtracted from the IPC layer + NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "prepareModelFromCache"); + VLOG(COMPILATION) << "prepareModelFromCache"; + const std::pair<int, std::shared_ptr<VersionedIPreparedModel>> kDeadObject = { + ANEURALNETWORKS_DEAD_OBJECT, nullptr}; + + // Get cache files if they exist, otherwise return from the function early. + hidl_vec<hidl_handle> modelCache, dataCache; + if (!getCacheHandles(cacheDir, token, kNumberOfCacheFilesNeeded, + /*createIfNotExist=*/false, &modelCache, &dataCache)) { + return prepareModelFailure(); + } + + // version 1.3+ HAL + if (getDevice<V1_3::IDevice>() != nullptr) { + const auto otp = makeTimePoint(deadline); + const sp<PreparedModelCallback> callback = new PreparedModelCallback(); + const Return<ErrorStatus> ret = recoverable<ErrorStatus, V1_3::IDevice>( + __FUNCTION__, + [&otp, &modelCache, &dataCache, &token, + &callback](const sp<V1_3::IDevice>& device) { + return device->prepareModelFromCache_1_3(otp, modelCache, dataCache, token, + callback); + }, + callback); + if (ret.isDeadObject()) { + LOG(ERROR) << "prepareModelFromCache_1_3 failure: " << ret.description(); + return kDeadObject; + } + if (!ret.isOk()) { + LOG(ERROR) << "prepareModelFromCache_1_3 failure: " << ret.description(); + return prepareModelFailure(); + } + if (ret != ErrorStatus::NONE) { + LOG(ERROR) << "prepareModelFromCache_1_3 returned " + << toString(static_cast<ErrorStatus>(ret)); + return prepareModelFailure(ret); + } + return prepareModelResult(*callback, "prepareModelFromCache_1_3", kServiceName); + } + + // version 1.2 HAL + if (getDevice<V1_2::IDevice>() != nullptr) { + const sp<PreparedModelCallback> callback = new PreparedModelCallback(); + const Return<V1_0::ErrorStatus> ret = recoverable<V1_0::ErrorStatus, V1_2::IDevice>( + __FUNCTION__, + [&modelCache, &dataCache, &token, &callback](const sp<V1_2::IDevice>& device) { + return device->prepareModelFromCache(modelCache, dataCache, token, callback); + }, + callback); + if (ret.isDeadObject()) { + LOG(ERROR) << "prepareModelFromCache failure: " << ret.description(); + return kDeadObject; + } + if (!ret.isOk()) { + LOG(ERROR) << "prepareModelFromCache failure: " << ret.description(); + return prepareModelFailure(); + } + const V1_0::ErrorStatus status = static_cast<V1_0::ErrorStatus>(ret); + if (status != V1_0::ErrorStatus::NONE) { + LOG(ERROR) << "prepareModelFromCache returned " << toString(status); + return prepareModelFailure(convertToV1_3(status)); + } + return prepareModelResult(*callback, "prepareModelFromCache", kServiceName); + } + + // version too low + if (getDevice<V1_0::IDevice>() != nullptr) { + LOG(ERROR) << "prepareModelFromCache called on V1_1 or V1_0 device"; + return prepareModelFailure(); + } + + // No device available + LOG(ERROR) << "prepareModelFromCache called with no device"; + return prepareModelFailure(); +} + +std::pair<int, std::shared_ptr<VersionedIPreparedModel>> VersionedIDevice::prepareModel( + const ModelFactory& makeModel, ExecutionPreference preference, Priority priority, + const std::optional<Deadline>& deadline, const std::string& cacheDir, + const std::optional<CacheToken>& maybeToken) const { + // Attempt to compile from cache if token is present. + if (maybeToken.has_value()) { + const auto [n, preparedModel] = + prepareModelFromCacheInternal(deadline, cacheDir, *maybeToken); + if (n == ANEURALNETWORKS_NO_ERROR) { + return {n, preparedModel}; + } + } + + // Fallback to full compilation (possibly with token) if + // prepareModelFromCache could not be used or failed. + const Model model = makeModel(); + return prepareModelInternal(model, preference, priority, deadline, cacheDir, maybeToken); +} + +int64_t VersionedIDevice::getFeatureLevel() const { + constexpr int64_t kFailure = -1; + + if (getDevice<V1_3::IDevice>() != nullptr) { + return __ANDROID_API_R__; + } else if (getDevice<V1_2::IDevice>() != nullptr) { + return __ANDROID_API_Q__; + } else if (getDevice<V1_1::IDevice>() != nullptr) { + return __ANDROID_API_P__; + } else if (getDevice<V1_0::IDevice>() != nullptr) { + return __ANDROID_API_O_MR1__; + } else { + LOG(ERROR) << "Device not available!"; + return kFailure; + } +} + +int32_t VersionedIDevice::getType() const { + return kType; +} + +const std::string& VersionedIDevice::getVersionString() const { + return kVersionString; +} + +std::pair<uint32_t, uint32_t> VersionedIDevice::getNumberOfCacheFilesNeeded() const { + return kNumberOfCacheFilesNeeded; +} + +const std::string& VersionedIDevice::getName() const { + return kServiceName; +} + +std::tuple<ErrorStatus, sp<IBuffer>, uint32_t> VersionedIDevice::allocate( + const BufferDesc& desc, + const std::vector<std::shared_ptr<VersionedIPreparedModel>>& versionedPreparedModels, + const hidl_vec<BufferRole>& inputRoles, const hidl_vec<BufferRole>& outputRoles) const { + const auto kFailure = std::make_tuple<ErrorStatus, sp<IBuffer>, uint32_t>( + ErrorStatus::GENERAL_FAILURE, nullptr, 0); + + // version 1.3+ HAL + if (getDevice<V1_3::IDevice>() != nullptr) { + hidl_vec<sp<V1_3::IPreparedModel>> preparedModels(versionedPreparedModels.size()); + std::transform(versionedPreparedModels.begin(), versionedPreparedModels.end(), + preparedModels.begin(), + [](const auto& preparedModel) { return preparedModel->getV1_3(); }); + + std::tuple<ErrorStatus, sp<IBuffer>, int32_t> result; + const Return<void> ret = recoverable<void, V1_3::IDevice>( + __FUNCTION__, [&](const sp<V1_3::IDevice>& device) { + return device->allocate(desc, preparedModels, inputRoles, outputRoles, + [&result](ErrorStatus error, const sp<IBuffer>& buffer, + uint32_t token) { + result = {error, buffer, token}; + }); + }); + if (!ret.isOk()) { + LOG(ERROR) << "allocate failure: " << ret.description(); + return kFailure; + } + return result; + } + + // version too low or no device available + LOG(ERROR) << "Could not handle allocate"; + return kFailure; +} + +} // namespace nn +} // namespace android
diff --git a/runtime/VersionedInterfaces.h b/runtime/VersionedInterfaces.h new file mode 100644 index 0000000..efde0bd --- /dev/null +++ b/runtime/VersionedInterfaces.h
@@ -0,0 +1,826 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_VERSIONED_INTERFACES_H +#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_VERSIONED_INTERFACES_H + +#include <android-base/macros.h> + +#include <cstddef> +#include <functional> +#include <memory> +#include <optional> +#include <shared_mutex> +#include <string> +#include <tuple> +#include <utility> +#include <vector> + +#include "Callbacks.h" +#include "HalInterfaces.h" +#include "Utils.h" + +namespace android { +namespace nn { + +// forward declarations +class ExecutionBurstController; +class IDeviceDeathHandler; +class IPreparedModelDeathHandler; +class MetaModel; +class VersionedIPreparedModel; + +/** + * Each class (VersionedIDevice, VersionedIPreparedModel) wraps a HIDL interface + * of any version to abstract away version differences. It allows the remainder + * of the runtime to always use the most up-to-date version of all HIDL types. + * As such, any reference to a HIDL type in the rest of the runtime + * will--by default--be the latest HIDL version. + * + * Each class will attempt to call the latest version of each interface method + * if possible. If the latest method is unavailable, the versioned class + * will attempt to upcast the type (e.g., V1_1::Model to V1_0::Model), and + * invoke the latest interface method possible. If the versioned class + * fails to find a matching applicable function, it will return an error. + */ + +/** This class wraps an IDevice object of any version. */ +class VersionedIDevice { + DISALLOW_IMPLICIT_CONSTRUCTORS(VersionedIDevice); + + // forward declaration of nested class + class Core; + + public: + /** + * Create a VersionedIDevice object. + * + * Prefer using this function over the constructor, as it adds more + * protections. + * + * @param serviceName The name of the service that provides "device". + * @param makeDevice A device factory function that returns a device object + * that is at least version 1.0 of the IDevice interface. + * @return A valid VersionedIDevice object, otherwise nullptr. + */ + static std::shared_ptr<VersionedIDevice> create(std::string serviceName, + const hal::DeviceFactory& makeDevice); + + /** + * Constructor for the VersionedIDevice object. + * + * VersionedIDevice will default to using the latest version of all IDevice + * interface methods automatically. + * + * @param capabilities Performance capabilities of the driver. + * @param supportedExtensions Extensions supported by the driver. + * @param type The device type of the driver. + * @param versionString The version string of the driver. + * @param numberOfCacheFilesNeeded Number of model cache and data cache + * files needed by the driver. + * @param serviceName The name of the service that provides core.getDevice<V1_0::IDevice>(). + * @param makeDevice A device factory function that returns a device object + * that is at least version 1.0 of the IDevice interface. + * @param core An object that encapsulates a V1_0::IDevice, any appropriate downcasts to + * newer interfaces, and a hidl_death_recipient that will proactively handle + * the case when the service containing the IDevice object crashes. + */ + VersionedIDevice(hal::Capabilities capabilities, + std::vector<hal::Extension> supportedExtensions, int32_t type, + std::string versionString, + std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded, + std::string serviceName, const hal::DeviceFactory& makeDevice, Core core); + + /** + * Gets the capabilities of a driver. + * + * @return capabilities Capabilities of the driver. + */ + const hal::Capabilities& getCapabilities() const; + + /** + * Gets information about extensions supported by the driver implementation. + * + * Extensions of category ExtensionCategory::BASE must not appear + * in the list. + * + * All extension operations and operands must be fully supported for the + * extension to appear in the list of supported extensions. + * + * @return extensions A list of supported extensions. + */ + const std::vector<hal::Extension>& getSupportedExtensions() const; + + /** + * Gets the supported operations in a MetaModel. + * + * getSupportedOperations indicates which operations of + * MetaModel::getModel() are fully supported by the vendor driver. If an + * operation may not be supported for any reason, getSupportedOperations + * must return false for that operation. + * + * @param metaModel A MetaModel whose operations--and their corresponding + * operands--are to be verified by the driver. When + * metaModel.getModel() is not compliant with the HAL + * version of the vendor driver, the MetaModel's slicing + * functionality (MetaModel::getSlice*()) is employed + * to query the vendor driver about which of the subset of + * compliant operations are supported. See the MetaModel + * class in MetaModel.h for more details. + * @return status Error status of the call, must be: + * - NONE if successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - INVALID_ARGUMENT if provided model is invalid + * @return supportedOperations A list of supported operations, where true + * indicates the operation is supported and + * false indicates the operation is not + * supported. The index of "supported" + * corresponds with the index of the operation + * it is describing. + */ + std::pair<hal::ErrorStatus, hal::hidl_vec<bool>> getSupportedOperations( + const MetaModel& metaModel) const; + + /** + * Creates a prepared model for execution. + * + * prepareModel is used to make any necessary transformations or alternative + * representations to a model for execution, possibly including + * transformations on the constant data, optimization on the model's graph, + * or compilation into the device's native binary format. The model itself + * is not changed. + * + * Optionally, caching information may be provided for the driver to either: + * - load the prepared model from cache, bypassing full model preparation + * - save the prepared model to cache for faster model compilation time when + * the same model preparation is requested in the future + * + * The prepareModel function must verify the inputs to the prepareModel + * function are correct. If there is an error, prepareModel must immediately + * return the appropriate result code and nullptr for the + * VersionedIPreparedModel. If the inputs to the prepareModel function are + * valid and there is no error, prepareModel must prepare the model. + * + * If the model was prepared successfully, prepareModel must return + * ANEURALNETWORKS_NO_ERROR and the produced VersionedIPreparedModel object. + * If an error occurred preparing the model, prepareModel must return the + * appropriate result code and nullptr for the VersionedIPreparedModel. + * + * The only information that may be unknown to the model at this stage is + * the shape of the tensors, which may only be known at execution time. As + * such, some driver services may return partially prepared models, where + * the prepared model may only be finished when it is paired with a set of + * inputs to the model. Note that the same prepared model object may be + * used with different shapes of inputs on different (possibly concurrent) + * executions. + * + * Multiple threads may call prepareModel on the same model concurrently. + * + * @param makeModel Factory function to create the model to be prepared for + * execution. + * @param preference Indicates the intended execution behavior of a prepared + * model. + * @param priority Priority of the prepared model relative to other prepared + * models owned by an application. + * @param deadline Optional time point. If provided, prepareModel is + * expected to complete by this time point. If it is not able to be + * completed by the deadline, the execution may be aborted. + * @param cacheDir String specifying the cache directory. + * @param maybeToken An optional caching token of length + * Constant::BYTE_SIZE_OF_CACHE_TOKEN identifying the prepared model. + * The same token will be provided when retrieving the prepared model + * from the cache files with prepareModelFromCache. Tokens should be + * chosen to have a low rate of collision for a particular application. + * The driver cannot detect a collision; a collision will result in a + * failed execution or in a successful execution that produces incorrect + * output values. If both modelCache and dataCache are empty indicating + * that caching information is not provided, this token must be ignored. + * @return A pair of: + * - Result code of preparing the model; must be: + * - ANEURALNETWORKS_NO_ERROR if preparation succeeded + * - ANEURALNETWORKS_UNAVAILABLE_DEVICE if driver is offline or busy + * - ANEURALNETWORKS_OP_FAILED if there is an unspecified error + * - ANEURALNETWORKS_BAD_DATA if one of the input arguments related + * to preparing the model is invalid + * - preparedModel A VersionedIPreparedModel object representing a model + * that has been prepared for execution, else nullptr. + */ + std::pair<int, std::shared_ptr<VersionedIPreparedModel>> prepareModel( + const hal::ModelFactory& makeModel, hal::ExecutionPreference preference, hal::Priority, + const std::optional<Deadline>& deadline, const std::string& cacheDir, + const std::optional<hal::CacheToken>& maybeToken) const; + + /** + * Returns the feature level of a driver. + * + * @return featureLevel The API level of the most advanced feature this driver implements. + * For example, if the driver implements the features introduced in + * Android P, the value would be 28. + * Return -1 if the driver is offline or busy, or the query resulted in + * an unspecified error. + */ + int64_t getFeatureLevel() const; + + /** + * Returns the device type of a driver. + * + * @return deviceType The type of a given device, which can help application + * developers to distribute Machine Learning workloads and other + * workloads such as graphical rendering. E.g., for an app which renders + * AR scenes based on real time object detection results, the developer + * could choose an ACCELERATOR type device for ML workloads, and reserve + * GPU for graphical rendering. + */ + int32_t getType() const; + + /** + * Get the version string of the driver implementation. + * + * The version string must be a unique token among the set of version strings of + * drivers of a specific device. The token identifies the device driver's + * implementation. The token must not be confused with the feature level which is solely + * defined by the interface version. This API is opaque to the Android framework, but the + * Android framework may use the information for debugging or to pass on to NNAPI applications. + * + * Application developers sometimes have specific requirements to ensure good user experiences, + * and they need more information to make intelligent decisions when the Android framework + * cannot. For example, combined with the device name and other information, the token can help + * NNAPI applications filter devices based on their needs: + * - An application demands a certain level of performance, but a specific version of + * the driver cannot meet that requirement because of a performance regression. + * The application can blacklist the driver based on the version provided. + * - An application has a minimum precision requirement, but certain versions of + * the driver cannot meet that requirement because of bugs or certain optimizations. + * The application can filter out versions of these drivers. + * + * @return version The version string of the device implementation. + */ + const std::string& getVersionString() const; + + /** + * Gets the caching requirements of the driver implementation. + * + * There are two types of cache file descriptors provided to the driver: model cache + * and data cache. + * + * The data cache is for caching constant data, possibly including preprocessed + * and transformed tensor buffers. Any modification to the data cache should + * have no worse effect than generating bad output values at execution time. + * + * The model cache is for caching security-sensitive data such as compiled + * executable machine code in the device's native binary format. A modification + * to the model cache may affect the driver's execution behavior, and a malicious + * client could make use of this to execute beyond the granted permission. Thus, + * the driver must always check whether the model cache is corrupted before + * preparing the model from cache. + * + * getNumberOfCacheFilesNeeded returns how many of each type of cache files the driver + * implementation needs to cache a single prepared model. Returning 0 for both types + * indicates compilation caching is not supported by this driver. The driver may + * still choose not to cache certain compiled models even if it reports that caching + * is supported. + * + * If the device reports that caching is not supported, the user may avoid calling + * IDevice::prepareModelFromCache or providing cache file descriptors to + * IDevice::prepareModel_1_2. + * + * @return numModelCache An unsigned integer indicating how many files for model cache + * the driver needs to cache a single prepared model. It must + * be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES. + * @return numDataCache An unsigned integer indicating how many files for data cache + * the driver needs to cache a single prepared model. It must + * be less than or equal to Constant::MAX_NUMBER_OF_CACHE_FILES. + */ + std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const; + + /** + * Returns the name of the service. + * + * @return Name of the service. + */ + const std::string& getName() const; + + /** + * Allocates a driver-managed buffer with the properties specified by the descriptor as well as + * the input and output roles of prepared models. + * + * The allocate function must verify the inputs to the allocate function are correct. If there + * is an error, or if a certain role or property is not supported by the driver, the allocate + * function must return with an appropriate ErrorStatus, a nullptr as the IBuffer, and 0 as the + * buffer token. If the allocation is successful, this method must return with ErrorStatus::NONE + * and the produced IBuffer with a positive token identifying the allocated buffer. A successful + * allocation must accommodate all of the specified roles and buffer properties. + * + * The buffer is allocated as an uninitialized state. An uninitialized buffer may only be used + * in ways that are specified by outputRoles. A buffer is initialized after it is used as an + * output in a successful execution, or after a successful invocation of IBuffer::copyFrom on + * the buffer. An initialized buffer may be used according to all roles specified in inputRoles + * and outputRoles. A buffer will return to the uninitialized state if it is used as an output + * in a failed execution, or after a failed invocation of IBuffer::copyFrom on the buffer. + * + * The driver may deduce the dimensions of the buffer according to the buffer descriptor as + * well as the input and output roles. The dimensions or rank of the buffer may be unknown at + * this stage. As such, some driver services may only create a placeholder and defer the actual + * allocation until execution time. Note that the same buffer may be used for different shapes + * of outputs on different executions. When the buffer is used as an input, the input shape + * must be the same as the output shape from the last execution using this buffer as an output. + * + * The driver must apply proper validatation upon every usage of the buffer, and fail the + * execution immediately if the usage is illegal. + * + * @param desc A buffer descriptor specifying the properties of the buffer to allocate. + * @param preparedModels A vector of IPreparedModel objects. Must only contain IPreparedModel + * objects from the same IDevice as this method invoked on. + * @param inputRoles A vector of roles with each specifying an input to a prepared model. + * @param outputRoles A vector of roles with each specifying an output to a prepared model. + * Each role specified in inputRoles and outputRoles must be unique. The corresponding + * model operands of the roles must have the same OperandType, scale, zero point, and + * ExtraParams. The dimensions of the operands and the dimensions specified in the buffer + * descriptor must be compatible with each other. Two dimensions are incompatible if there + * is at least one axis that is fully specified in both but has different values. + * @return A tuple consisting of: + * - Error status of the buffer allocation. Must be: + * - NONE if successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if a certain buffer property or a certain role is not supported, + * or if there is an unspecified error + * - INVALID_ARGUMENT if one of the input arguments is invalid + * - The allocated IBuffer object. If the buffer was unable to be allocated + * due to an error, nullptr must be returned. + * - A positive token identifying the allocated buffer. The same token will be + * provided when referencing the buffer as one of the memory pools in the request of an + * execution. If the buffer was unable to be allocated due to an error, the token must be + * 0. + */ + std::tuple<hal::ErrorStatus, sp<hal::IBuffer>, uint32_t> allocate( + const hal::BufferDesc& desc, + const std::vector<std::shared_ptr<VersionedIPreparedModel>>& preparedModels, + const hal::hidl_vec<hal::BufferRole>& inputRoles, + const hal::hidl_vec<hal::BufferRole>& outputRoles) const; + + /** + * Blocks until the device is not in a bad state. + * + * @return Error code after waiting. ANEURALNETWORKS_NO_ERROR if device is + * not in a bad state. + */ + int wait() const; + + private: + // Cached initialization results. + const hal::Capabilities kCapabilities; + const std::vector<hal::Extension> kSupportedExtensions; + const int32_t kType; + const std::string kVersionString; + const std::pair<uint32_t, uint32_t> kNumberOfCacheFilesNeeded; + + // internal methods to prepare a model + std::pair<int, std::shared_ptr<VersionedIPreparedModel>> prepareModelInternal( + const hal::Model& model, hal::ExecutionPreference preference, hal::Priority priority, + const std::optional<Deadline>& deadline, const std::string& cacheDir, + const std::optional<hal::CacheToken>& maybeToken) const; + std::pair<int, std::shared_ptr<VersionedIPreparedModel>> prepareModelFromCacheInternal( + const std::optional<Deadline>& deadline, const std::string& cacheDir, + const hal::CacheToken& token) const; + + /** + * This is a utility class for VersionedIDevice that encapsulates a + * V1_0::IDevice, any appropriate downcasts to newer interfaces, and a + * hidl_death_recipient that will proactively handle the case when the + * service containing the IDevice object crashes. + * + * This is a convenience class to help VersionedIDevice recover from an + * IDevice object crash: It bundles together all the data that needs to + * change when recovering from a crash, and simplifies the process of + * instantiating that data (at VersionedIDevice creation time) and + * re-instantiating that data (at crash recovery time). + */ + class Core { + public: + /** + * Constructor for the Core object. + * + * Core is constructed with a V1_0::IDevice object, which represents a + * device that is at least v1.0 of the interface. The constructor + * downcasts to the latest version of the IDevice interface, allowing + * VersionedIDevice to default to using the latest version of all + * IDevice interface methods automatically. + * + * @param device A device object that is at least version 1.0 of the IDevice + * interface. + * @param deathHandler A hidl_death_recipient that will proactively handle + * the case when the service containing the IDevice + * object crashes. + */ + Core(sp<hal::V1_0::IDevice> device, sp<IDeviceDeathHandler> deathHandler); + + /** + * Destructor for the Core object. + * + * This destructor unlinksToDeath this object's hidl_death_recipient as it + * no longer needs to handle the case where the IDevice's service crashes. + */ + ~Core(); + + // Support move but not copy + Core(Core&&) noexcept; + Core& operator=(Core&&) noexcept; + Core(const Core&) = delete; + Core& operator=(const Core&) = delete; + + /** + * Create a Core object. + * + * Prefer using this function over the constructor, as it adds more + * protections. + * + * This call linksToDeath a hidl_death_recipient that can + * proactively handle the case when the service containing the IDevice + * object crashes. + * + * @param device A device object that is at least version 1.0 of the IDevice + * interface. + * @return A valid Core object, otherwise nullopt. + */ + static std::optional<Core> create(sp<hal::V1_0::IDevice> device); + + /** + * Returns sp<*::IDevice> that is a downcast of the sp<V1_0::IDevice> + * passed to the constructor. This will be nullptr if that IDevice is + * not actually of the specified downcast type. + */ + template <typename T_IDevice> + sp<T_IDevice> getDevice() const; + template <> + sp<hal::V1_0::IDevice> getDevice() const { + return mDeviceV1_0; + } + template <> + sp<hal::V1_1::IDevice> getDevice() const { + return mDeviceV1_1; + } + template <> + sp<hal::V1_2::IDevice> getDevice() const { + return mDeviceV1_2; + } + template <> + sp<hal::V1_3::IDevice> getDevice() const { + return mDeviceV1_3; + } + + /** + * Returns sp<*::IDevice> (as per getDevice()) and the + * hidl_death_recipient that will proactively handle the case when the + * service containing the IDevice object crashes. + */ + template <typename T_IDevice> + std::pair<sp<T_IDevice>, sp<IDeviceDeathHandler>> getDeviceAndDeathHandler() const; + + private: + /** + * All versions of IDevice are necessary because the driver could be v1.0, + * v1.1, or a later version. All these pointers logically represent the same + * object. + * + * The general strategy is: HIDL returns a V1_0 device object, which + * (if not nullptr) could be v1.0, v1.1, or a greater version. The V1_0 + * object is then "dynamically cast" to a V1_1 object. If successful, + * mDeviceV1_1 will point to the same object as mDeviceV1_0; otherwise, + * mDeviceV1_1 will be nullptr. + * + * In general: + * * If the device is truly v1.0, mDeviceV1_0 will point to a valid object + * and mDeviceV1_1 will be nullptr. + * * If the device is truly v1.1 or later, both mDeviceV1_0 and mDeviceV1_1 + * will point to the same valid object. + * + * Idiomatic usage: if mDeviceV1_1 is non-null, do V1_1 dispatch; otherwise, + * do V1_0 dispatch. + */ + sp<hal::V1_0::IDevice> mDeviceV1_0; + sp<hal::V1_1::IDevice> mDeviceV1_1; + sp<hal::V1_2::IDevice> mDeviceV1_2; + sp<hal::V1_3::IDevice> mDeviceV1_3; + + /** + * HIDL callback to be invoked if the service for mDeviceV1_0 crashes. + * + * nullptr if this Core instance is a move victim and hence has no + * callback to be unlinked. + */ + sp<IDeviceDeathHandler> mDeathHandler; + }; + + // This method retrieves the appropriate mCore.mDevice* field, under a read lock. + template <typename T_IDevice> + sp<T_IDevice> getDevice() const EXCLUDES(mMutex) { + std::shared_lock lock(mMutex); + return mCore.getDevice<T_IDevice>(); + } + + // This method retrieves the appropriate mCore.mDevice* fields, under a read lock. + template <typename T_IDevice> + auto getDeviceAndDeathHandler() const EXCLUDES(mMutex) { + std::shared_lock lock(mMutex); + return mCore.getDeviceAndDeathHandler<T_IDevice>(); + } + + // This method calls the function fn in a manner that supports recovering + // from a driver crash: If the driver implementation is dead because the + // driver crashed either before the call to fn or during the call to fn, we + // will attempt to obtain a new instance of the same driver and call fn + // again. + // + // If a callback is provided, this method protects it against driver death + // and waits for it (callback->wait()). + template <typename T_Return, typename T_IDevice, typename T_Callback = std::nullptr_t> + hal::Return<T_Return> recoverable( + const char* context, + const std::function<hal::Return<T_Return>(const sp<T_IDevice>&)>& fn, + const T_Callback& callback = nullptr) const EXCLUDES(mMutex); + + // The name of the service that implements the driver. + const std::string kServiceName; + + // Factory function object to generate an IDevice object. + const hal::DeviceFactory kMakeDevice; + + // Guards access to mCore. + mutable std::shared_mutex mMutex; + + // Data that can be rewritten during driver recovery. Guarded againt + // synchronous access by a mutex: Any number of concurrent read accesses is + // permitted, but a write access excludes all other accesses. + mutable Core mCore GUARDED_BY(mMutex); +}; + +/** This class wraps an IPreparedModel object of any version. */ +class VersionedIPreparedModel { + DISALLOW_IMPLICIT_CONSTRUCTORS(VersionedIPreparedModel); + + public: + /** + * Constructor for the VersionedIPreparedModel object. + * + * This constructor should not be used directly. Instead, + * VersionedIPreparedModel should be created via + * VersionedIDevice::prepareModel*. + * + * VersionedIPreparedModel is constructed with the V1_0::IPreparedModel object, which + * represents a device that is at least v1.0 of the interface. The constructor downcasts + * to the latest version of the IPreparedModel interface, and will default to using the + * latest version of all IPreparedModel interface methods automatically. + * + * @param preparedModel A prepared model object that is least version 1.0 of the + * IPreparedModel interface. + * @param deathHandler A hidl_death_recipient that will proactively handle + * the case when the service containing the IDevice + * object crashes. + */ + VersionedIPreparedModel(sp<hal::V1_0::IPreparedModel> preparedModel, + sp<IPreparedModelDeathHandler> deathHandler); + + /** + * Destructor for the VersionedIPreparedModel object. + * + * This destructor unlinksToDeath this object's hidl_death_recipient as it + * no longer needs to handle the case where the IPreparedModel's service + * crashes. + */ + ~VersionedIPreparedModel(); + + /** + * Performs a synchronous execution on a prepared model. + * + * The execution is performed synchronously with respect to the caller. + * VersionedIPreparedModel::execute must verify the inputs to the function + * are correct. If there is an error, VersionedIPreparedModel::execute must + * immediately return with the appropriate result code. If the inputs to the + * function are valid and there is no error, + * VersionedIPreparedModel::execute must perform the execution, and must not + * return until the execution is complete. + * + * If the prepared model was prepared from a model wherein all tensor + * operands have fully specified dimensions, and the inputs to the function + * are valid, and at execution time every operation's input operands have + * legal values, then the execution should complete successfully + * (ANEURALNETWORKS_NO_ERROR): There must be no failure unless the device + * itself is in a bad state. + * + * execute may be called with an optional deadline. If the execution is not + * able to be completed before the provided deadline, the execution may be + * aborted, and either {@link ErrorStatus::MISSED_DEADLINE_TRANSIENT} or + * {@link ErrorStatus::MISSED_DEADLINE_PERSISTENT} must be returned. The + * error due to an abort must be sent the same way as other errors, + * described above. + * + * Any number of calls to the VersionedIPreparedModel::execute function, in + * any combination, may be made concurrently, even on the same + * VersionedIPreparedModel object. + * + * @param request The input and output information on which the prepared + * model is to be executed. + * @param measure Specifies whether or not to measure duration of the + * execution. + * @param deadline Optional time point. If provided, prepareModel is + * expected to complete by this time point. If it is not able to be + * completed by the deadline, the execution may be aborted. + * @param loopTimeoutDuration The maximum amount of time that should be spent + * executing a {@link OperationType::WHILE} operation. If a loop + * condition model does not output false within this duration, the + * execution must be aborted. If no loop timeout duration is provided, + * the maximum amount of time is {@link LoopTimeoutDurationNs::DEFAULT}. + * When provided, the duration must not exceed {@link + * LoopTimeoutDurationNs::MAXIMUM}. + * @param preferSynchronous 'true' to perform synchronous HAL execution when + * possible, 'false' to force asynchronous HAL execution. + * @return A tuple consisting of: + * - Result code of the execution, must be: + * - ANEURALNETWORKS_NO_ERROR if execution is performed successfully + * - ANEURALNETWORKS_UNAVAILABLE_DEVICE if driver is offline or busy + * - ANEURALNETWORKS_OP_FAILED if there is an unspecified error + * - ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE if at least one output + * operand buffer is not large enough to store the corresponding + * output + * - ANEURALNETWORKS_BAD_DATA if one of the input arguments is + * invalid + * - A list of shape information of model output operands. + * The index into "outputShapes" corresponds to the index of the + * output operand in the Request outputs vector. outputShapes must + * be empty unless the result code is either + * ANEURALNETWORKS_NO_ERROR or + * ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE. outputShapes may be + * empty if the result code is ANEURALNETWORKS_NO_ERROR and all + * model output operands are fully-specified at execution time. + * outputShapes must have the same number of elements as the number + * of model output operands if the result code is + * ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE, or if the result code + * is ANEURALNETWORKS_NO_ERROR and the model has at least one output + * operand that is not fully-specified. + * - Duration of execution. Unless measure is YES and result code is + * ANEURALNETWORKS_NO_ERROR, all times must be reported as + * UINT64_MAX. A driver may choose to report any time as UINT64_MAX, + * indicating that measurement is not available. + */ + std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> execute( + const hal::Request& request, hal::MeasureTiming measure, + const std::optional<Deadline>& deadline, + const hal::OptionalTimeoutDuration& loopTimeoutDuration, bool preferSynchronous) const; + + /** + * Creates a burst controller on a prepared model. + * + * @param preferPowerOverLatency 'true' if the Burst object should run in a + * more power efficient mode, 'false' if more + * power can be used to possibly reduce + * burst compute latency. + * @return ExecutionBurstController Execution burst controller object. + * nullptr is returned if the burst cannot + * be configured for any reason. + */ + std::shared_ptr<ExecutionBurstController> configureExecutionBurst( + bool preferPowerOverLatency) const; + + /** + * Launch a fenced asynchronous execution on a prepared model. + * + * The execution is performed asynchronously with respect to the caller. + * executeFenced must fully validate the request. If there is an error during validation, + * executeFenced must immediately return with the corresponding ErrorStatus. If the inputs + * to the function are valid and there is no error and there is no error launching, + * executeFenced must dispatch an asynchronous task to perform the execution in the + * background, and immediately return with ErrorStatus::NONE, a sync fence that will be + * signaled once the execution is completed, and a callback that can be used by the client + * to query the duration and runtime error status. If the task has finished + * before the call returns, empty handle may be returned for the syncFence. If the + * asynchronous task fails to launch, executeFenced must immediately return with + * ErrorStatus::GENERAL_FAILURE, an empty handle for the syncFence, and nullptr + * for callback. The execution must wait for all the sync fences (if any) in waitFor to be + * signaled before starting the actual execution. + * + * If any of sync fences in waitFor changes to error status after the executeFenced + * call succeeds, the driver must immediately set the returned syncFence to error status. + * + * When the asynchronous task has finished its execution, it must + * immediately signal the syncFence returned from executeFenced call. After + * the syncFence is signaled, the task must not modify the content of + * any data object referenced by 'request' (described by the + * {@link @1.0::DataLocation} of a {@link @1.0::RequestArgument}). + * + * executeFenced may be called with an optional deadline and an optional + * timeoutDurationAfterFence. If the execution is not able to be completed + * before the provided deadline or within the timeoutDurationAfterFence, + * whichever comes earlier, the execution may be aborted, and either {@link + * ErrorStatus::MISSED_DEADLINE_TRANSIENT} or {@link + * ErrorStatus::MISSED_DEADLINE_PERSISTENT} may be returned. The error due + * to an abort must be sent the same way as other errors, described above. + * + * Any number of calls to the executeFenced, execute* and executeSynchronously* + * functions, in any combination, may be made concurrently, even on the same + * IPreparedModel object. + * + * @param request The input and output information on which the prepared + * model is to be executed. + * @param waitFor A vector of sync fence file descriptors. The execution must + * wait for all sync fence to be signaled before starting the + * task. + * @param measure Specifies whether or not to measure duration of the execution. + * @param deadline The time by which execution is expected to complete. If + * the execution cannot be finished by the deadline, the + * execution may be aborted. + * @param loopTimeoutDuration The maximum amount of time that should be spent + * executing a {@link OperationType::WHILE} operation. If a loop + * condition model does not output false within this duration, the + * execution must be aborted. If no loop timeout duration is provided, + * the maximum amount of time is {@link LoopTimeoutDurationNs::DEFAULT}. + * When provided, the duration must not exceed {@link + * LoopTimeoutDurationNs::MAXIMUM}. + * @param timeoutDurationAfterFence The timeout duration within which the + * execution is expected to complete after + * all sync fences in waitFor are signaled. + * @return A tuple consisting of: + * - Error code of the dispatch call. + * - A sync_fence that will be triggered when the task is completed. + * The sync_fence will be set to error if critical error occurs when doing + * actual evaluation. + * - A callback can be used to query information like duration + * and detailed runtime error status when the task is completed. + * - Optional timing information. Only useful if the call is simulated using + * sync execution. Either IFencedExecutionCallback will be + * returned or optional timing information is returned + */ + std::tuple<int, hal::hidl_handle, sp<hal::IFencedExecutionCallback>, hal::Timing> executeFenced( + const hal::Request& request, const hal::hidl_vec<hal::hidl_handle>& waitFor, + hal::MeasureTiming measure, const std::optional<Deadline>& deadline, + const hal::OptionalTimeoutDuration& loopTimeoutDuration, + const hal::OptionalTimeoutDuration& timeoutDurationAfterFence); + + private: + friend class VersionedIDevice; + + std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> executeAsynchronously( + const hal::Request& request, hal::MeasureTiming timing, + const std::optional<Deadline>& deadline, + const hal::OptionalTimeoutDuration& loopTimeoutDuration) const; + std::tuple<int, std::vector<hal::OutputShape>, hal::Timing> executeSynchronously( + const hal::Request& request, hal::MeasureTiming measure, + const std::optional<Deadline>& deadline, + const hal::OptionalTimeoutDuration& loopTimeoutDuration) const; + + /** + * Returns sp<V1_3::IPreparedModel> that is a downcast of the sp<V1_0::IPreparedModel> + * passed to the constructor. This will be nullptr if that IPreparedModel is + * not actually of the specified downcast type. + */ + sp<hal::V1_3::IPreparedModel> getV1_3() const { return mPreparedModelV1_3; } + + /** + * All versions of IPreparedModel are necessary because the preparedModel could be v1.0, + * v1.2, or a later version. All these pointers logically represent the same object. + * + * The general strategy is: HIDL returns a V1_0 prepared model object, which + * (if not nullptr) could be v1.0, v1.2, or a greater version. The V1_0 + * object is then "dynamically cast" to objects of later versions. If successful, + * mPreparedModel* will point to the same object as mPreparedModelV1_0; otherwise, + * mPreparedModel* will be nullptr. + * + * In general: + * * If the prepared model is truly v1.0, mPreparedModelV1_0 will point to a valid object, + * both mPreparedModelV1_2 and mPreparedModelV1_3 will be nullptr. + * * If the prepared model is truly v1.2, both mPreparedModelV1_0 and mPreparedModelV1_2 + * will point to the same valid object, but mPreparedModelV1_3 will be nullptr. + * * If the prepared model is truly v1.3 or later, all of mPreparedModelV1_0, + * mPreparedModelV1_2, and mPreparedModelV1_3 will point to the same valid object. + * + * Idiomatic usage: if mPreparedModelV1_3 is non-null, do V1_3 dispatch; + * otherwise, if mPreparedModelV1_2 is non-null, do V1_2 dispatch; + * otherwise, do V1_0 dispatch. + */ + sp<hal::V1_0::IPreparedModel> mPreparedModelV1_0; + sp<hal::V1_2::IPreparedModel> mPreparedModelV1_2; + sp<hal::V1_3::IPreparedModel> mPreparedModelV1_3; + + /** + * HIDL callback to be invoked if the service for mPreparedModelV1_0 crashes. + */ + const sp<IPreparedModelDeathHandler> mDeathHandler; +}; + +} // namespace nn +} // namespace android + +#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_VERSIONED_INTERFACES_H
diff --git a/runtime/include/NeuralNetworks.h b/runtime/include/NeuralNetworks.h index a6a10d0..1eb2f54 100644 --- a/runtime/include/NeuralNetworks.h +++ b/runtime/include/NeuralNetworks.h
@@ -43,29 +43,6276 @@ */ #include <android/hardware_buffer.h> -#include <stdbool.h> #include <stddef.h> #include <stdint.h> #include <sys/cdefs.h> -#include "NeuralNetworksTypes.h" - -// This is required for building libneuralnetworks_cl, -// the symbols have same names as in NDK, but -// they are not bounded by API availability. -#ifdef NN_COMPATIBILITY_LIBRARY_BUILD -#define __NNAPI_INTRODUCED_IN(x) -#else -#define __NNAPI_INTRODUCED_IN(x) __INTRODUCED_IN(x) -#endif - -#ifndef __NNAPI_FL5_MIN_ANDROID_API__ -#define __NNAPI_FL5_MIN_ANDROID_API__ __ANDROID_API_S__ -#endif - __BEGIN_DECLS /** + * Operand types. + * + * The type of an operand in a model. + * + * Types prefaced with ANEURALNETWORKS_TENSOR_* must be used for tensor data (i.e., tensors + * with at least one dimension). Types not prefaced by ANEURALNETWORKS_TENSOR_* represent + * scalar values and must have no dimensions. + * + * Although we define many types, most operators accept just a few + * types. Most used are {@link ANEURALNETWORKS_TENSOR_FLOAT32}, + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, + * and {@link ANEURALNETWORKS_INT32}. + * + * Available since API level 27. + */ +typedef enum { + /** A 32 bit floating point scalar value. */ + ANEURALNETWORKS_FLOAT32 = 0, + /** A signed 32 bit integer scalar value. */ + ANEURALNETWORKS_INT32 = 1, + /** An unsigned 32 bit integer scalar value. */ + ANEURALNETWORKS_UINT32 = 2, + /** A tensor of 32 bit floating point values. */ + ANEURALNETWORKS_TENSOR_FLOAT32 = 3, + /** A tensor of 32 bit integer values. */ + ANEURALNETWORKS_TENSOR_INT32 = 4, + /** + * A tensor of 8 bit unsigned integers that represent real numbers. + * + * Attached to this tensor are two numbers that can be used to convert the + * 8 bit integer to the real value and vice versa. These two numbers are: + * - scale: a 32 bit floating point value greater than zero. + * - zeroPoint: a 32 bit integer, in range [0, 255]. + * + * The formula is: + * real_value = (integer_value - zeroPoint) * scale. + */ + ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5, + /** + * An 8 bit boolean scalar value. + * + * Values of this operand type are either true or false. A zero value + * represents false; any other value represents true. + * + * Available since API level 29. + */ + ANEURALNETWORKS_BOOL = 6, + /** + * A tensor of 16 bit signed integers that represent real numbers. + * + * Attached to this tensor is a number representing real value scale that is + * used to convert the 16 bit number to a real value in the following way: + * realValue = integerValue * scale. + * + * scale is a 32 bit floating point with value greater than zero. + * + * Available since API level 29. + */ + ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7, + /** + * A tensor of IEEE 754 16 bit floating point values. + * + * Available since API level 29. + */ + ANEURALNETWORKS_TENSOR_FLOAT16 = 8, + /** + * A tensor of 8 bit boolean values. + * + * Values of this operand type are either true or false. A zero value + * represents false; any other value represents true. + * + * Available since API level 29. + */ + ANEURALNETWORKS_TENSOR_BOOL8 = 9, + /** + * An IEEE 754 16 bit floating point scalar value. + * + * Available since API level 29. + */ + ANEURALNETWORKS_FLOAT16 = 10, + /** + * A tensor of 8 bit signed integers that represent real numbers. + * + * This tensor is associated with additional fields that can + * be used to convert the 8 bit signed integer to the real value and vice versa. + * These fields are: + * - channelDim: a 32 bit unsigned integer indicating channel dimension. + * - scales: an array of positive 32 bit floating point values. + * The size of the scales array must be equal to dimensions[channelDim]. + * + * {@link ANeuralNetworksModel_setOperandSymmPerChannelQuantParams} must be used + * to set the parameters for an Operand of this type. + * + * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0). + * + * The formula is: + * realValue[..., C, ...] = + * integerValue[..., C, ...] * scales[C] + * where C is an index in the Channel dimension. + * + * Available since API level 29. + */ + ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11, + /** + * A tensor of 16 bit unsigned integers that represent real numbers. + * + * Attached to this tensor are two numbers that can be used to convert the + * 16 bit integer to the real value and vice versa. These two numbers are: + * - scale: a 32 bit floating point value greater than zero. + * - zeroPoint: a 32 bit integer, in range [0, 65535]. + * + * The formula is: + * real_value = (integer_value - zeroPoint) * scale. + * + * Available since API level 29. + */ + ANEURALNETWORKS_TENSOR_QUANT16_ASYMM = 12, + /** + * A tensor of 8 bit signed integers that represent real numbers. + * + * Attached to this tensor is a number representing real value scale that is + * used to convert the 8 bit number to a real value in the following way: + * realValue = integerValue * scale. + * + * scale is a 32 bit floating point with value greater than zero. + * + * Available since API level 29. + */ + ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13, + /** + * A tensor of 8 bit signed integers that represent real numbers. + * + * Attached to this tensor are two numbers that can be used to convert the + * 8 bit integer to the real value and vice versa. These two numbers are: + * - scale: a 32 bit floating point value greater than zero. + * - zeroPoint: a 32 bit integer, in range [-128, 127]. + * + * The formula is: + * real_value = (integer_value - zeroPoint) * scale. + * + * Available since API level 30. + */ + ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14, + + /** + * A reference to a model. + * + * {@link ANeuralNetworksModel_setOperandValueFromModel} must be used to set + * the value for an Operand of this type. + * + * Available since API level 30. + */ + ANEURALNETWORKS_MODEL = 15, +} OperandCode; + +/** + * Operation types. + * + * The type of an operation in a model. + * + * Available since API level 27. + */ +typedef enum { + // Operations below are available since API level 27. + + /** + * Adds two tensors, element-wise. + * + * Takes two input tensors of identical {@link OperandCode} and compatible + * dimensions. The output is the sum of both input tensors, optionally + * modified by an activation function. + * + * Two dimensions are compatible when: + * 1. they are equal, or + * 2. one of them is 1 + * + * The size of the output is the maximum size along each dimension of the + * input operands. It starts with the trailing dimensions, and works its + * way forward. + * + * Example: + * + * input1.dimension = {4, 1, 2} + * input2.dimension = {5, 4, 3, 1} + * output.dimension = {5, 4, 3, 2} + * + * Since API level 29, generic zero-sized input tensor is supported. Zero + * dimension is only compatible with 0 or 1. The size of the output + * dimension is zero if either of corresponding input dimension is zero. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: A tensor. + * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions + * as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scales and zeroPoint can be different from input0 scale and zeroPoint. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, + * the {@link FuseCode} must be "NONE". + * + * Outputs: + * * 0: The sum, a tensor of the same {@link OperandCode} as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint can be different from inputs' scale and zeroPoint. + * + * Available since API level 27. + */ + ANEURALNETWORKS_ADD = 0, + + /** + * Performs a 2-D average pooling operation. + * + * The output dimensions are functions of the filter dimensions, stride, and + * padding. + * + * The values in the output tensor are computed as: + * + * output[b, i, j, channel] = + * sum_{di, dj}( + * input[b, strides[1] * i + di, strides[2] * j + dj, channel] + * ) / sum(1) + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * NCHW is supported since API level 29. + * + * Both explicit padding and implicit padding are supported. + * + * Inputs (explicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. + * Since API level 29, zero batches is supported for this tensor. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the left, in the ‘width’ dimension. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the right, in the ‘width’ dimension. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the top, in the ‘height’ dimension. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the bottom, in the ‘height’ dimension. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter + * width. + * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter + * height. + * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * + * Inputs (implicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. + * Since API level 29, zero batches is supported for this tensor. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit + * padding scheme, has to be one of the + * {@link PaddingCode} values. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter + * width. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter + * height. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * + * Outputs: + * * 0: The output 4-D tensor, of shape + * [batches, out_height, out_width, depth]. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 27. + */ + ANEURALNETWORKS_AVERAGE_POOL_2D = 1, + + /** + * Concatenates the input tensors along the given dimension. + * + * The input tensors must have identical {@link OperandCode} and the same + * dimensions except the dimension along the concatenation axis. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * (full support since API level 29, see the input section) + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0 ~ n-1: The list of n input tensors, of shape + * [D0, D1, ..., Daxis(i), ..., Dm]. + * Before API level 29, all input tensors of + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * must have the same scale and zeroPoint as the output tensor. + * Input tensors of + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} + * are allowed to have different scale and zeroPoint. + * Since API level 29, zero-sized tensors are supported. + * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the + * concatenation axis. + * + * Outputs: + * * 0: The output, a tensor of the same {@link OperandCode} as the input + * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. + * Since API level 29, for a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, + * the scale and zeroPoint values can be different from + * input tensors. Before API level 29 they have to be the same as for the input tensors. + * + * Available since API level 27. + */ + ANEURALNETWORKS_CONCATENATION = 2, + + /** + * Performs a 2-D convolution operation. + * + * The CONV_2D op sweeps a 2-D filter that can mix channels together over a + * batch of images, applying the filter to each window of each image of the + * appropriate size. + * + * The output dimensions are functions of the filter dimensions, stride, and + * padding. + * + * The values in the output tensor are computed as: + * + * output[b, i, j, channel] = + * sum_{di, dj, k} ( + * input[b, strides[1] * i + di, strides[2] * j + dj, k] * + * filter[channel, di, dj, k] + * ) + bias[channel] + * + * Supported tensor {@link OperandCode} configurations: + * * 32 bit floating point: + * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. + * + * * Quantized: + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to + * * * input.scale * filter.scale). + * + * Available since API level 29: + * * 16 bit floating point: + * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. + * + * * Quantized with symmetric per channel quantization for the filter: + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, + * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). + * + * Available since API level 30: + * * Quantized signed (since API level 30): + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to + * * * input.scale * filter.scale). + * + * * Quantized signed with filter symmetric per channel quantization (since API level 30): + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, + * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * NCHW is supported since API level 29. + * + * Both explicit padding and implicit padding are supported. + * + * Inputs (explicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], + * specifying the input. + * Since API level 29, zero batches is supported for this tensor. + * * 1: A 4-D tensor, of shape + * [depth_out, filter_height, filter_width, depth_in], specifying the + * filter. + * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} + * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) + * must be set to 0. + * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input + * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint + * of 0 and bias_scale == input_scale * filter_scale. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 + * and bias_scale of 0. The actual scale of each value 'i' is equal to + * bias_scale[i] = input_scale * filter_scale[i]. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the left, in the ‘width’ dimension. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the right, in the ‘width’ dimension. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the top, in the ‘height’ dimension. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the bottom, in the ‘height’ dimension. + * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * * 11: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation + * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped + * cells between each filter element on width dimension. If this input is set, + * input 12 (dilation factor for height) must be specified as well. + * Available since API level 29. + * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation + * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped + * cells between each filter element on height dimension. If this input is set, + * input 11 (dilation factor for width) must be specified as well. + * Available since API level 29. + * + * Inputs (implicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], + * specifying the input. + * Since API level 29, zero batches is supported for this tensor. + * * 1: A 4-D tensor, of shape + * [depth_out, filter_height, filter_width, depth_in], specifying the + * filter. + * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} + * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) + * must be set to 0. + * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input + * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same + * type. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint + * of 0 and bias_scale == input_scale * filter_scale. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 + * and bias_scale of 0. The actual scale of each value 'i' is equal to + * bias_scale[i] = input_scale * filter_scale[i]. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit + * padding scheme, has to be one of the + * {@link PaddingCode} values. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * * 8: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation + * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped + * cells between each filter element on width dimension. If this input is set, + * input 9 (dilation factor for height) must be specified as well. + * Available since API level 29. + * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation + * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped + * cells between each filter element on height dimension. If this input is set, + * input 8 (dilation factor for width) must be specified as well. + * Available since API level 29. + * + * Outputs: + * * 0: The output 4-D tensor, of shape + * [batches, out_height, out_width, depth_out]. + * Before API level 29, for output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, + * the following condition must be satisfied: output_scale > input_scale * filter_scale + * + * Available since API level 27. + */ + ANEURALNETWORKS_CONV_2D = 3, + + /** + * Performs a depthwise 2-D convolution operation. + * + * Given an input tensor of shape [batches, height, width, depth_in] and a + * filter tensor of shape [1, filter_height, filter_width, depth_out] + * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV + * applies a different filter to each input channel (expanding from 1 + * channel to channel_multiplier channels for each), then concatenates the + * results together. + * + * The output has depth_out = depth_in * depth_multiplier channels. + * The output dimensions are functions of the filter dimensions, stride, and + * padding. + * + * The values in the output tensor are computed as: + * + * output[b, i, j, k * channel_multiplier + q] = + * sum_{di, dj} ( + * input[b, strides[1] * i + di, strides[2] * j + dj, k] * + * filter[1, di, dj, k * channel_multiplier + q] + * ) + bias[k * channel_multiplier + q] + * + * Supported tensor {@link OperandCode} configurations: + * * 32 bit floating point: + * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. + * + * * Quantized: + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to + * * * input.scale * filter.scale). + * + * Available since API level 29: + * * 16 bit floating point: + * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. + * + * * Quantized with symmetric per channel quantization for the filter: + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, + * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). + * + * Available since API level 30: + * * Quantized signed (since API level 30): + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to + * * * input.scale * filter.scale). + * + * * Quantized signed with filter symmetric per channel quantization (since API level 30): + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, + * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * NCHW is supported since API level 29. + * + * Both explicit padding and implicit padding are supported. + * + * Inputs (explicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], + * specifying the input. + * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], + * specifying the filter. + * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} + * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) + * must be set to 3. + * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input + * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint + * of 0 and bias_scale == input_scale * filter_scale. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 + * and bias_scale of 0. The actual scale of each value 'i' is equal to + * bias_scale[i] = input_scale * filter_scale[i]. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the left, in the ‘width’ dimension. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the right, in the ‘width’ dimension. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the top, in the ‘height’ dimension. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the bottom, in the ‘height’ dimension. + * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise + * multiplier. + * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 11: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation + * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped + * cells between each filter element on width dimension. If this input is set, + * input 13 (dilation factor for height) must be specified as well. + * Available since API level 29. + * * 13: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation + * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped + * cells between each filter element on height dimension. If this input is set, + * input 12 (dilation factor for width) must be specified as well. + * Available since API level 29. + * + * Inputs (implicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], + * specifying the input. + * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], + * specifying the filter. + * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input + * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint + * of 0 and bias_scale == input_scale * filter_scale. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 + * and bias_scale of 0. The actual scale of each value 'i' is equal to + * bias_scale[i] = input_scale * filter_scale[i]. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit + * padding scheme, has to be one of the + * {@link PaddingCode} values. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise + * multiplier. + * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 8: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation + * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped + * cells between each filter element on width dimension. If this input is set, + * input 10 (dilation factor for height) must be specified as well. + * Available since API level 29. + * * 10: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation + * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped + * cells between each filter element on height dimension. If this input is set, + * input 9 (dilation factor for width) must be specified as well. + * Available since API level 29. + * + * Outputs: + * * 0: The output 4-D tensor, of shape + * [batches, out_height, out_width, depth_out]. Before API level 29, for + * output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, + * the following condition must be satisfied: + * output_scale > input_scale * filter_scale + * + * Available since API level 27. + */ + ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4, + + /** + * Rearranges data from depth into blocks of spatial data. + * + * More specifically, this op outputs a copy of the input tensor where + * values from the depth dimension are moved in spatial blocks to the height + * and width dimensions. The value block_size indicates the input block size + * and how the data is moved. + * + * Chunks of data of size block_size * block_size from depth are rearranged + * into non-overlapping blocks of size block_size x block_size. + * + * The width of the output tensor is input_depth * block_size, whereas the + * height is input_height * block_size. The depth of the input tensor must + * be divisible by block_size * block_size + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * NCHW is supported since API level 29. + * + * Inputs: + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], + * specifying the input. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size. + * block_size must be >=1 and block_size * block_size must be a divisor + * of the input depth. + * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * + * Outputs: + * * 0: The output 4-D tensor, of shape [batch, height*block_size, + * width*block_size, depth/(block_size*block_size)]. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 27. + */ + ANEURALNETWORKS_DEPTH_TO_SPACE = 5, + + /** + * Dequantizes the input tensor. + * + * The formula is: + * + * output = (input - zeroPoint) * scale. + * + * Supported input tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported output tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: A tensor. + * Since API level 29, this tensor may be zero-sized. + * + * Outputs: + * * 0: A tensor with the same shape as input0. + * + * Available since API level 27. + */ + ANEURALNETWORKS_DEQUANTIZE = 6, + + /** + * Looks up sub-tensors in the input tensor. + * + * This operator takes for input a tensor of values (Values) and + * a one-dimensional tensor of selection indices (Lookups). + * The output tensor is the concatenation of sub-tensors of Values as + * selected by Lookups. + * + * Think of Values as being sliced along its first dimension: + * The entries in Lookups select which slices are concatenated together + * to create the output tensor. + * + * For example, if Values has shape of [40, 200, 300] and + * Lookups has shape of [3], all three values found in Lookups are + * expected to be between 0 and 39. The resulting tensor must + * have shape of [3, 200, 300]. + * + * If a value in Lookups is out of bounds, the operation must fail + * and an error must be reported. + * + * Supported value tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 30) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported value tensor rank: from 2 + * + * Inputs: + * * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. + * The values are indices into the first dimension of Values. + * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are + * extracted. + * + * Output: + * * 0: A n-D tensor with the same rank and shape as the Values + * tensor, except for the first dimension which has the same size + * as Lookups' only dimension. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input1. + * + * Available since API level 27. + */ + ANEURALNETWORKS_EMBEDDING_LOOKUP = 7, + + /** + * Computes element-wise floor() on the input tensor. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: A tensor. + * + * Outputs: + * * 0: The output tensor, of the same {@link OperandCode} and dimensions as + * the input tensor. + * + * Available since API level 27. + */ + ANEURALNETWORKS_FLOOR = 8, + + /** + * Denotes a fully (densely) connected layer, which connects all elements + * in the input tensor with each element in the output tensor. + * + * This layer implements the operation: + * + * outputs = activation(inputs * weights’ + bias) + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4. + * + * Inputs: + * * 0: A tensor of at least rank 2, specifying the input. If rank is + * greater than 2, then it gets flattened to a 2-D Tensor. The + * (flattened) 2-D Tensor is reshaped (if necessary) to + * [batch_size, input_size], where "input_size" corresponds to the + * number of inputs to the layer, matching the second dimension of + * weights, and "batch_size" is calculated by dividing the number of + * elements by "input_size". + * Since API level 29, zero batch_size is supported for this tensor. + * * 1: A 2-D tensor, specifying the weights, of shape + * [num_units, input_size], where "num_units" corresponds to the number + * of output nodes. + * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input + * tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should + * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, + * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * + * Outputs: + * * 0: The output tensor, of shape [batch_size, num_units]. Before API level 29, for + * output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following + * condition must be satisfied: output_scale > input_scale * filter_scale. + * + * Available since API level 27. + */ + ANEURALNETWORKS_FULLY_CONNECTED = 9, + + /** + * Looks up sub-tensors in the input tensor using a key-value map. + * + * This operator takes for input a tensor of values (Values), + * a one-dimensional tensor of selection values (Lookups) and + * a one-dimensional tensor that maps these values to Values + * indexes. The output tensor is the concatenation of sub-tensors of + * Values as selected by Lookups via Keys. + * + * Think of Values as being sliced along its outer-most dimension. + * The output is a concatenation of selected slices, with one slice + * for each entry of Lookups. The slice selected is the one at the + * same index as the Maps entry that matches the value in Lookups. + * + * For a hit, the corresponding sub-tensor of Values is included + * in the Output tensor. For a miss, the corresponding sub-tensor in + * Output must have zero values. + * + * For example, if Values has shape of [40, 200, 300], + * Keys should have a shape of [40]. If Lookups tensor has shape + * of [3], three slices are being concatenated, so the resulting tensor + * must have the shape of [3, 200, 300]. If the first entry in Lookups + * has the value 123456, that value must be located in Keys tensor. + * If the sixth entry of Keys contains 123456, the sixth slice of Values + * must be selected. If no entry in Keys has 123456, a slice of zeroes + * must be concatenated. + * + * Supported value tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * + * Supported value tensor rank: from 2 + * + * Inputs: + * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with + * shape [ k ]. + * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape + * [ n ]; Keys and Values pair represent a map, i.e., the ith element + * in Keys (Keys[i]) is the key to select the ith sub-tensor in Values + * (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in + * ascending order. + * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension + * must be n. + * + * Outputs: + * * 0: Output. A tensor with shape [ k …]. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, + * the scale and zeroPoint must be the same as input2. + * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup + * hits (True) or not (False). + * Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0 + * and scale 1.0f. + * A non-zero byte represents True, a hit. A zero indicates otherwise. + * + * Available since API level 27. + */ + ANEURALNETWORKS_HASHTABLE_LOOKUP = 10, + + /** + * Applies L2 normalization along the axis dimension. + * + * The values in the output tensor are computed as: + * + * output[batch, row, col, channel] = + * input[batch, row, col, channel] / + * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) + * + * By default the axis dimension is the last dimension of the input tensor. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4 + * Tensors with rank less than 4 are only supported since API level 29. + * + * Inputs: + * * 0: An n-D tensor, specifying the tensor to be normalized. + * * 1: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, + * specifying the dimension normalization would be performed on. + * Negative index is used to specify axis from the end (e.g. -1 for + * the last axis). Must be in the range [-n, n). + * Available since API level 29. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} and same shape as input0. + * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, + * the scale must be 1.f / 128 and the zeroPoint must be 128. + * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the scale must be 1.f / 128 and the zeroPoint must be 0. + * + * NOTE: Before API level 30, if the elements along an axis are all zeros, + * the result is undefined. Since API level 30, if the elements along an axis + * are all zeros, the result is logical zero. + * + * Available since API level 27. + */ + ANEURALNETWORKS_L2_NORMALIZATION = 11, + + /** + * Performs an 2-D L2 pooling operation. + * + * The output dimensions are functions of the filter dimensions, stride, and + * padding. + * + * The values in the output tensor are computed as: + * + * output[b, i, j, c] = + * sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) / + * sum(1)) + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * NCHW is supported since API level 29. + * + * Both explicit padding and implicit padding are supported. + * + * Inputs (explicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. + * Since API level 29, zero batches is supported for this tensor. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the left, in the ‘width’ dimension. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the right, in the ‘width’ dimension. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the top, in the ‘height’ dimension. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the bottom, in the ‘height’ dimension. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter + * width. + * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter + * height. + * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * + * Inputs (implicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. + * Since API level 29, zero batches is supported for this tensor. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit + * padding scheme, has to be one of the + * {@link PaddingCode} values. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter + * width. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter + * height. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * + * Outputs: + * * 0: The output 4-D tensor, of shape + * [batches, out_height, out_width, depth]. + * + * Available since API level 27. + */ + ANEURALNETWORKS_L2_POOL_2D = 12, + + /** + * Applies Local Response Normalization along the depth dimension. + * + * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the + * last dimension), and each vector is normalized independently. Within a + * given vector, each component is divided by the weighted, squared sum of + * inputs within depth_radius. + * + * The output is calculated using this formula: + * + * sqr_sum[a, b, c, d] = sum( + * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)) + * output = input / pow((bias + alpha * sqr_sum), beta) + * + * For input tensor with rank less than 4, independently normalizes each + * 1-D slice along specified dimension. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: up to 4 + * Tensors with rank less than 4 are only supported since API level 29. + * + * Inputs: + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of + * the normalization window. + * * 2: A scalar, specifying the bias, must not be zero. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias + * value must be of {@link ANEURALNETWORKS_FLOAT16}. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias + * value must be of {@link ANEURALNETWORKS_FLOAT32}. + * * 3: A scalar, specifying the scale factor, alpha. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the + * alpha value must be of {@link ANEURALNETWORKS_FLOAT16}. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the + * alpha value must be of {@link ANEURALNETWORKS_FLOAT32}. + * * 4: A scalar, specifying the exponent, beta. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta + * value must be of {@link ANEURALNETWORKS_FLOAT16}. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta + * value must be of {@link ANEURALNETWORKS_FLOAT32}. + * * 5: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, + * specifying the dimension normalization would be performed on. + * Negative index is used to specify axis from the end (e.g. -1 for + * the last axis). Must be in the range [-n, n). + * Available since API level 29. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * + * Available since API level 27. + */ + ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13, + + /** + * Computes sigmoid activation on the input tensor element-wise. + * + * The output is calculated using this formula: + * + * output = 1 / (1 + exp(-input)) + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4. + * + * Inputs: + * * 0: A tensor, specifying the input. + * Since API level 29, this tensor may be zero-sized. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, + * the scale must be 1.f / 256 and the zeroPoint must be 0. + * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the scale must be 1.f / 256 and the zeroPoint must be -128. + * + * Available since API level 27. + */ + ANEURALNETWORKS_LOGISTIC = 14, + + /** + * Projects an input to a bit vector via locality senstive hashing. + * + * Supported input tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * + * Supported input tensor rank: from 1 + * + * Inputs: + * * 0: Hash functions. Dim.size == 2, DataType: Float. + * Tensor[0].Dim[0]: Number of hash functions. + * Tensor[0].Dim[1]: Number of projected output bits generated by each + * hash function. + * If the projection type is Sparse: + * Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32 + * + * * 1: Input. Dim.size >= 1, no restriction on DataType. + * * 2: Weight. Optional. Dim.size == 1, DataType: Float. + * If not set, each input element is considered to have the same weight + * of 1.0. + * Tensor[1].Dim[0] == Tensor[2].Dim[0] + * * 3: Type: + * Sparse: + * Value LSHProjectionType_SPARSE(=3) (since API level 29). + * Computed bit vector is considered to be sparse. + * Each output element is an int32 made up of multiple bits + * computed from hash functions. + * + * NOTE: To avoid collisions across hash functions, an offset value + * of k * (1 << Tensor[0].Dim[1]) will be added to each signature, + * where k is the index of the hash function. + * + * Value LSHProjectionType_SPARSE_DEPRECATED(=1). + * Legacy behavior that does not include the offset value. + * + * Dense: + * Value LSHProjectionType_DENSE(=2). + * Computed bit vector is considered to be dense. Each output + * element represents a bit and can take the value of either + * 0 or 1. + * + * Outputs: + * * 0: If the projection type is Sparse: + * Output.Dim == { Tensor[0].Dim[0] } + * A tensor of int32 that represents hash signatures. + * + * If the projection type is Dense: + * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] } + * A flattened tensor that represents projected bit vectors. + * + * Available since API level 27. + * The offset value for sparse projections was added in API level 29. + */ + ANEURALNETWORKS_LSH_PROJECTION = 15, + + /** + * Performs a single time step in a Long Short-Term Memory (LSTM) layer + * + * The LSTM operation is described by the following equations. + * + * \f{eqnarray*}{ + * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\ + * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\ + * C_t =& clip(f_t \odot C_{t-1} + i_t \odot + * g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\ + * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\ + * & & \\ + * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj}) + * & if\ there\ is\ a\ projection; \\ + * h_t =& & \\ + * & o_t \odot g(C_t) & otherwise. \\ + * \f} + * Where: + * * \f$x_t\f$ is the input, + * * \f$i_t\f$ is the input gate, + * * \f$f_t\f$ is the forget gate, + * * \f$C_t\f$ is the cell state, + * * \f$o_t\f$ is the output, + * * \f$h_t\f$ is the output state, + * * \f$\sigma\f$ is the logistic sigmoid function, + * * \f$g\f$ is the cell input and cell output activation function, usually + * \f$tahn\f$, + * * \f$W_{xi}\f$ is the input-to-input weight matrix, + * * \f$W_{hi}\f$ is the recurrent to input weight matrix, + * * \f$W_{ci}\f$ is the cell-to-input weight matrix, + * * \f$b_i\f$ is the input gate bias, + * * \f$W_{xf}\f$ is the input-to-forget weight matrix, + * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix, + * * \f$W_{cf}\f$ is the cell-to-forget weight matrix, + * * \f$b_f\f$ is the forget gate bias, + * * \f$W_{xc}\f$ is the input-to-cell weight matrix, + * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix, + * * \f$b_c\f$ is the cell bias, + * * \f$W_{xo}\f$ is the input-to-output weight matrix, + * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix, + * * \f$W_{co}\f$ is the cell-to-output weight matrix, + * * \f$b_o\f$ is the output gate bias, + * * \f$W_{proj}\f$ is the projection weight matrix, + * * \f$b_{proj}\f$ is the projection bias, + * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and + * * \f$t_{proj}\f$ is the threshold for clipping the projected output. + * * \f$\odot\f$ is the + * <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)"> + * Hadamard product</a> that takes two matrices and produces another + * matrix, each element of which is the product of the corresponding + * elements of the input matrices. + * + * Since API level 29 LSTM supports layer normalization. + * In case layer normalization is used, the inputs to internal activation + * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered + * following an approach from section 3.1 from + * https://arxiv.org/pdf/1607.06450.pdf + * + * The operation has the following independently optional inputs: + * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights + * (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all + * have values or neither of them have values (i.e., all set to null). If + * they have values, the peephole optimization is used. + * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights + * (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values, + * or none of them have values. If they have no values, coupling of input + * and forget gates (CIFG) is used, in which case the input gate + * (\f$i_t\f$) is calculated using the following equation instead. + * \f{eqnarray*}{ + * i_t = 1 - f_t + * \f} + * In case peephole optimization is used and CIFG is not used + * cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the + * cell-to-input weights must have no value. + * * The projection weights (\f$W_{proj}\f$) is required only for the + * recurrent projection layer, and should otherwise have no value. + * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a + * value if the recurrent projection layer exists, and should otherwise + * have no value. + * * (API level 29 or later) The four layer normalization weights either all have + * values or none of them have values. Additionally, if CIFG is used, + * input layer normalization weights tensor is omitted and the other layer + * normalization weights either all have values or none of them have + * values. Layer normalization is used when the values of all the layer + * normalization weights are present. + * + * References: + * + * The default non-peephole non-CIFG implementation is based on: + * http://www.bioinf.jku.at/publications/older/2604.pdf + * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural + * Computation, 9(8):1735-1780, 1997. + * + * The peephole implementation and projection layer is based on: + * https://research.google.com/pubs/archive/43905.pdf + * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory + * recurrent neural network architectures for large scale acoustic + * modeling." INTERSPEECH, 2014. + * (However, the concept of peephole optimization was introduced in work + * prior to this paper.) + * + * The coupling of input and forget gate (CIFG) is based on: + * http://arxiv.org/pdf/1503.04069.pdf + * Greff et al. "LSTM: A Search Space Odyssey" + * + * The layer normalization is based on: + * https://arxiv.org/pdf/1607.06450.pdf + * Jimmy Ba et al. "Layer Normalization" + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * All input and output tensors must be of the same type. + * + * Inputs: + * * 0: The input (\f$x_t\f$). + * A 2-D tensor of shape [batch_size, input_size], where “batch_size” + * corresponds to the batching dimension, and “input_size” is the size + * of the input. + * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. + * A 2-D tensor of shape [num_units, input_size], where “num_units” + * corresponds to the number of cell units. + * * 2: The input-to-forget weights (\f$W_{xf}\f$). + * A 2-D tensor of shape [num_units, input_size]. + * * 3: The input-to-cell weights (\f$W_{xc}\f$). + * A 2-D tensor of shape [num_units, input_size]. + * * 4: The input-to-output weights (\f$W_{xo}\f$). + * A 2-D tensor of shape [num_units, input_size]. + * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. + * A 2-D tensor of shape [num_units, output_size], where “output_size” + * corresponds to either the number of cell units (i.e., “num_units”), + * or the second dimension of the “projection_weights”, if defined. + * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). + * A 2-D tensor of shape [num_units, output_size]. + * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). + * A 2-D tensor of shape [num_units, output_size]. + * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). + * A 2-D tensor of shape [num_units, output_size]. + * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. + * A 1-D tensor of shape [num_units]. + * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. + * A 1-D tensor of shape [num_units]. + * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. + * A 1-D tensor of shape [num_units]. + * * 12:The input gate bias (\f$b_i\f$). Optional. + * A 1-D tensor of shape [num_units]. + * * 13:The forget gate bias (\f$b_f\f$). + * A 1-D tensor of shape [num_units]. + * * 14:The cell bias (\f$b_c\f$). + * A 1-D tensor of shape [num_units]. + * * 15:The output gate bias (\f$b_o\f$). + * A 1-D tensor of shape [num_units]. + * * 16:The projection weights (\f$W_{proj}\f$). Optional. + * A 2-D tensor of shape [output_size, num_units]. + * * 17:The projection bias (\f$b_{proj}\f$). Optional. + * A 1-D tensor of shape [output_size]. + * * 18:The output state (in) (\f$h_{t-1}\f$). + * A 2-D tensor of shape [batch_size, output_size]. + * * 19:The cell state (in) (\f$C_{t-1}\f$). + * A 2-D tensor of shape [batch_size, num_units]. + * * 20:The activation function (\f$g\f$). + * A value indicating the activation function: + * <ul> + * <li>0: None; + * <li>1: Relu; + * <li>3: Relu6; + * <li>4: Tanh; + * <li>6: Sigmoid. + * </ul> + * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such + * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 + * then clipping is disabled. + * Until API level 29 this scalar must be of type {@link + * ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input + * tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this + * scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, + * otherwise if all the input tensors have the type {@link + * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link + * ANEURALNETWORKS_FLOAT16}. + * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the + * projection layer, such that values are bound within + * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. + * Until API level 29 this scalar must be of type {@link + * ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input + * tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this + * scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, + * otherwise if all the input tensors have the type {@link + * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link + * ANEURALNETWORKS_FLOAT16}. + * Since API level 29 there are additional inputs to this op: + * * 23:The input layer normalization weights. + * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs + * to activation at input gate. + * * 24:The forget layer normalization weights. + * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs + * to activation at forget gate. + * * 25:The cell layer normalization weights. + * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs + * to activation at cell gate. + * * 26:The output layer normalization weights. + * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs + * to activation at output gate. + * + * Outputs: + * * 0: The scratch buffer. + * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or + * [batch_size, num_units * 4] without CIFG. + * * 1: The output state (out) (\f$h_t\f$). + * A 2-D tensor of shape [batch_size, output_size]. + * * 2: The cell state (out) (\f$C_t\f$). + * A 2-D tensor of shape [batch_size, num_units]. + * * 3: The output (\f$o_t\f$). + * A 2-D tensor of shape [batch_size, output_size]. This is effectively + * the same as the current “output state (out)” value. + * + * Available since API level 27. + */ + ANEURALNETWORKS_LSTM = 16, + + /** + * Performs an 2-D max pooling operation. + * + * The output dimensions are functions of the filter dimensions, stride, and + * padding. + * + * The values in the output tensor are computed as: + * + * output[b, i, j, channel] = + * max_{di, dj} ( + * input[b, strides[1] * i + di, strides[2] * j + dj, channel] + * ) + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * NCHW is supported since API level 29. + * + * Both explicit padding and implicit padding are supported. + * + * Inputs (explicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. + * Since API level 29, zero batches is supported for this tensor. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the left, in the ‘width’ dimension. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the right, in the ‘width’ dimension. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the top, in the ‘height’ dimension. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the bottom, in the ‘height’ dimension. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter + * width. + * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter + * height. + * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * + * Inputs (implicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. + * Since API level 29, zero batches is supported for this tensor. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit + * padding scheme, has to be one of the + * {@link PaddingCode} values. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter + * width. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter + * height. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * + * Outputs: + * * 0: The output 4-D tensor, of shape + * [batches, out_height, out_width, depth]. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 27. + */ + ANEURALNETWORKS_MAX_POOL_2D = 17, + + /** + * Multiplies two tensors, element-wise. + * + * Takes two input tensors of identical {@link OperandCode} and compatible + * dimensions. The output is the product of both input tensors, optionally + * modified by an activation function. + * + * Two dimensions are compatible when: + * 1. they are equal, or + * 2. one of them is 1 + * + * The size of the resulting output is the maximum size along each dimension + * of the input operands. It starts with the trailing dimensions, and works + * its way forward. + * + * Since API level 29, generic zero-sized input tensor is supported. Zero + * dimension is only compatible with 0 or 1. The size of the output + * dimension is zero if either of corresponding input dimension is zero. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: A tensor. + * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions + * as input0. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, + * the {@link FuseCode} must be "NONE". + * + * Outputs: + * * 0: The product, a tensor of the same {@link OperandCode} as input0. + * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the following condition must be satisfied: + * output_scale > input1_scale * input2_scale. + * + * Available since API level 27. + */ + ANEURALNETWORKS_MUL = 18, + + /** + * Computes rectified linear activation on the input tensor element-wise. + * + * The output is calculated using this formula: + * + * output = max(0, input) + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4. + * + * Inputs: + * * 0: A tensor, specifying the input. + * Since API level 29, this tensor may be zero-sized. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 27. + */ + ANEURALNETWORKS_RELU = 19, + + /** + * Computes rectified linear 1 activation on the input tensor element-wise. + * + * The output is calculated using this formula: + * + * output = min(1.f, max(-1.f, input)) + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4. + * + * Inputs: + * * 0: A tensor, specifying the input. + * Since API level 29, this tensor may be zero-sized. + * + * Outputs: + * * 0: The output tensor of the same shape as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 27. + */ + ANEURALNETWORKS_RELU1 = 20, + + /** + * Computes rectified linear 6 activation on the input tensor element-wise. + * + * The output is calculated using this formula: + * + * output = min(6, max(0, input)) + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4. + * + * Inputs: + * * 0: A tensor, specifying the input. + * Since API level 29, this tensor may be zero-sized. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 27. + */ + ANEURALNETWORKS_RELU6 = 21, + + /** + * Reshapes a tensor. + * + * Given tensor, this operation returns a tensor that has the same values as + * tensor, but with a newly specified shape. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4. + * + * Inputs: + * * 0: A tensor, specifying the tensor to be reshaped. + * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the + * shape of the output tensor. The number of elements implied by shape + * must be the same as the number of elements in the input tensor. + * + * If one component of shape is the special value -1, the size of that + * dimension is computed so that the total size remains constant. In + * particular, a shape of [-1] flattens into 1-D. At most one component + * of shape can be -1. + * + * Outputs: + * * 0: The output tensor, of shape specified by the input shape. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 27. + */ + ANEURALNETWORKS_RESHAPE = 22, + + /** + * Resizes images to given size using the bilinear interpretation. + * + * Resized images must be distorted if their output aspect ratio is not the + * same as input aspect ratio. The corner pixels of output may not be the + * same as corner pixels of input. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * NCHW is supported since API level 29. + * + * Both resizing by shape and resizing by scale are supported. + * + * Inputs (resizing by shape): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. + * Since API level 29, zero batches is supported for this tensor. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output + * width of the output tensor. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output + * height of the output tensor. + * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} + * scalar, default to false. If True, the centers of the 4 corner + * pixels of the input and output tensors are aligned, preserving the + * values at the corner pixels. + * Available since API level 30. + * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} + * scalar, default to false. If True, the pixel centers are assumed to + * be at (0.5, 0.5). This is the default behavior of image.resize in + * TF 2.0. If this parameter is True, then align_corners parameter + * must be False. + * Available since API level 30. + * + * Inputs (resizing by scale, since API level 29): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. Zero batches is supported for this tensor. + * * 1: A scalar, specifying width_scale, the scaling factor of the width + * dimension from the input tensor to the output tensor. The output + * width is calculated as new_width = floor(width * width_scale). + * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is + * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of + * {@link ANEURALNETWORKS_FLOAT32} otherwise. + * * 2: A scalar, specifying height_scale, the scaling factor of the height + * dimension from the input tensor to the output tensor. The output + * height is calculated as new_height = floor(height * height_scale). + * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is + * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of + * {@link ANEURALNETWORKS_FLOAT32} otherwise. + * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} + * scalar, default to false. If True, the centers of the 4 corner + * pixels of the input and output tensors are aligned, preserving the + * values at the corner pixels. + * Available since API level 30. + * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} + * scalar, default to false. If True, the pixel centers are assumed to + * be at (0.5, 0.5). This is the default behavior of image.resize in + * TF 2.0. If this parameter is True, then align_corners parameter + * must be False. + * Available since API level 30. + * + * Outputs: + * * 0: The output 4-D tensor, of shape + * [batches, new_height, new_width, depth]. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 27. + */ + ANEURALNETWORKS_RESIZE_BILINEAR = 23, + + /** + * A basic recurrent neural network layer. + * + * This layer implements the operation: + * outputs = state = activation(inputs * input_weights + + * state * recurrent_weights + bias) + * + * Where: + * * “input_weights” is a weight matrix that multiplies the inputs; + * * “recurrent_weights” is a weight matrix that multiplies the current + * “state” which itself is the output from the previous time step + * computation; + * * “bias” is a bias vector (added to each output vector in the batch); + * * “activation” is the function passed as the “fused_activation_function” + * argument (if not “NONE”). + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * The input tensors must all be the same type. + * + * Inputs: + * * 0: input. + * A 2-D tensor of shape [batch_size, input_size], where “batch_size” + * corresponds to the batching dimension, and “input_size” is the size + * of the input. + * * 1: weights. + * A 2-D tensor of shape [num_units, input_size], where “num_units” + * corresponds to the number of units. + * * 2: recurrent_weights. + * A 2-D tensor of shape [num_units, num_units], with columns + * corresponding to the weights from each unit. + * * 3: bias. + * A 1-D tensor of shape [num_units]. + * * 4: hidden state (in). + * A 2-D tensor of shape [batch_size, num_units]. + * * 5: fused_activation_function. + * An optional {@link FuseCode} value indicating the + * activation function. If “NONE” is specified then it results in a + * linear activation. + * + * Outputs: + * * 0: hidden state (out). + * A 2-D tensor of shape [batch_size, num_units]. + * + * * 1: output. + * A 2-D tensor of shape [batch_size, num_units]. This is effectively + * the same as the current state value. + * + * Available since API level 27. + */ + ANEURALNETWORKS_RNN = 24, + + /** + * Computes the softmax activation on the input tensor element-wise, per + * batch, by normalizing the input vector so the maximum coefficient is + * zero. + * + * The output is calculated using this formula: + * + * output[batch, i] = + * exp((input[batch, i] - max(input[batch, :])) * beta) / + * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)} + * + * For input tensor with rank other than 2, the activation will be applied + * independently on each 1-D slice along specified dimension. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4. + * Tensors with rank other than 2 or 4 are only supported since API level 29. + * + * Inputs: + * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. + * Since API level 29, this tensor may be zero-sized. + * * 1: A scalar, specifying the positive scaling factor for the exponent, + * beta. If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scalar + * must be of {@link ANEURALNETWORKS_FLOAT32}. + * If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, then the + * scalar must be of {@link ANEURALNETWORKS_FLOAT16}. + * * 2: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, + * specifying the dimension the activation would be performed on. + * Negative index is used to specify axis from the end (e.g. -1 for + * the last axis). Must be in the range [-n, n). + * Available since API level 29. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, + * the scale must be 1.f / 256 and the zeroPoint must be 0. + * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the scale must be 1.f / 256 and the zeroPoint must be -128. + * + * Available since API level 27. + */ + ANEURALNETWORKS_SOFTMAX = 25, + + /** + * Rearranges blocks of spatial data, into depth. + * + * More specifically, this op outputs a copy of the input tensor where + * values from the height and width dimensions are moved to the depth + * dimension. The value block_size indicates the input block size and how + * the data is moved. + * + * Chunks of data of size block_size * block_size from depth are rearranged + * into non-overlapping blocks of size block_size x block_size. + * + * The depth of the output tensor is input_depth * block_size * block_size. + * The input tensor's height and width must be divisible by block_size. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * NCHW is supported since API level 29. + * + * Inputs: + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], + * specifying the input. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size. + * block_size must be >=1 and block_size must be a divisor of both the + * input height and width. + * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * + * Outputs: + * * 0: The output 4-D tensor, of shape [batches, height/block_size, + * width/block_size, depth_in*block_size*block_size]. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 27. + */ + ANEURALNETWORKS_SPACE_TO_DEPTH = 26, + + /** + * SVDF op is a kind of stateful layer derived from the notion that a + * densely connected layer that's processing a sequence of input frames can + * be approximated by using a singular value decomposition of each of its + * nodes. The implementation is based on: + * + * https://research.google.com/pubs/archive/43813.pdf + * + * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada. + * “Compressing Deep Neural Networks using a Rank-Constrained Topology”. + * INTERSPEECH, 2015. + * + * It processes the incoming input using a 2-stage filtering mechanism: + * * stage 1 performs filtering on the "features" dimension, whose outputs + * get pushed into a memory of fixed-size memory_size. + * * stage 2 performs filtering on the "time" dimension of the memory_size + * memoized outputs of stage 1. + * + * Specifically, for rank 1, this layer implements the operation: + * + * memory = push(conv1d(inputs, weights_feature, feature_dim, + * "ANEURALNETWORKS_PADDING_VALID")); + * outputs = activation(memory * weights_time + bias); + * + * Where: + * * “weights_feature” is a weights matrix that processes the inputs (by + * convolving the input with every “feature filter”), and whose outputs + * get pushed, stacked in order, into the fixed-size “memory” (the oldest + * entry gets dropped); + * * “weights_time” is a weights matrix that processes the “memory” (by a + * batched matrix multiplication on the num_units); + * * “bias” is an optional bias vector (added to each output vector in the + * batch); and + * * “activation” is the function passed as the “fused_activation_function” + * argument (if not “NONE”). + * + * Each rank adds a dimension to the weights matrices by means of stacking + * the filters. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * All input tensors must be the same type. + * + * Inputs: + * * 0: input. + * A 2-D tensor of shape [batch_size, input_size], where “batch_size” + * corresponds to the batching dimension, and “input_size” is the size + * of the input. + * * 1: weights_feature. + * A 2-D tensor of shape [num_units, input_size], where “num_units” + * corresponds to the number of units. + * * 2: weights_time. + * A 2-D tensor of shape [num_units, memory_size], where “memory_size” + * corresponds to the fixed-size of the memory. + * * 3: bias. + * An optional 1-D tensor of shape [num_units]. + * * 4: state (in). + * A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank]. + * * 5: rank. + * The rank of the SVD approximation. + * * 6: fused_activation_function. + * An optional {@link FuseCode} value indicating the + * activation function. If “NONE” is specified then it results in a + * linear activation. + * + * Outputs: + * * 0: state (out). + * A 2-D tensor of the same {@link OperandCode} as the inputs, with shape + * [batch_size, (memory_size - 1) * num_units * rank]. + * * 1: output. + * A 2-D tensor of the same {@link OperandCode} as the inputs, with shape + * [batch_size, num_units]. + * + * Available since API level 27. + */ + ANEURALNETWORKS_SVDF = 27, + + /** + * Computes hyperbolic tangent of input tensor element-wise. + * + * The output is calculated using this formula: + * + * output = tanh(input) + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4. + * + * Inputs: + * * 0: A tensor, specifying the input. + * Since API level 29, this tensor may be zero-sized. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, + * the scale must be 1.f / 128 and the zeroPoint must be 128. + * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the scale must be 1.f / 128 and the zeroPoint must be 0. + * + * Available since API level 27. + */ + ANEURALNETWORKS_TANH = 28, + + // Operations below are available since API level 28. + + /** + * BatchToSpace for N-dimensional tensors. + * + * This operation reshapes the batch dimension (dimension 0) into M + 1 + * dimensions of shape block_shape + [batch], interleaves these blocks back + * into the grid defined by the spatial dimensions [1, ..., M], to obtain a + * result with the same rank as the input. + * + * This is the reverse of SpaceToBatch. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * NCHW is supported since API level 29. + * + * Inputs: + * * 0: An n-D tensor, specifying the tensor to be reshaped + * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block + * sizes for each spatial dimension of the input tensor. All values + * must be >= 1. + * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 28. + */ + ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29, + + /** + * Element-wise division of two tensors. + * + * Takes two input tensors of identical {@link OperandCode} and compatible + * dimensions. The output is the result of dividing the first input tensor + * by the second, optionally modified by an activation function. + * + * For inputs of {@link ANEURALNETWORKS_TENSOR_INT32}, performs + * "floor division" ("//" in Python). For example, + * 5 // 2 = 2 + * -5 // 2 = -3 + * + * Two dimensions are compatible when: + * 1. they are equal, or + * 2. one of them is 1 + * + * The size of the output is the maximum size along each dimension of the + * input operands. It starts with the trailing dimensions, and works its way + * forward. + * + * Example: + * input1.dimension = {4, 1, 2} + * input2.dimension = {5, 4, 3, 1} + * output.dimension = {5, 4, 3, 2} + * + * Since API level 29, generic zero-sized input tensor is supported. Zero + * dimension is only compatible with 0 or 1. The size of the output + * dimension is zero if either of corresponding input dimension is zero. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor, specifying the first input. + * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions + * as input0. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, + * the {@link FuseCode} must be "NONE". + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * + * Available since API level 28. + */ + ANEURALNETWORKS_DIV = 30, + + /** + * Computes the mean of elements across dimensions of a tensor. + * + * Reduces the input tensor along the given dimensions to reduce. Unless + * keep_dims is true, the rank of the tensor is reduced by 1 for each entry + * in axis. If keep_dims is true, the reduced dimensions are retained with + * length 1. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: A tensor, specifying the input. + * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions + * to reduce. Must be in the range + * [-rank(input_tensor), rank(input_tensor)). + * + * NOTE: When the operation was introduced, the documentation + * incorrectly stated that if dimensions were empty, the operation + * would reduce across all dimensions. This behavior was never + * implemented. + * + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive, + * retains reduced dimensions with length 1. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * If all dimensions are reduced and keep_dims is false, the output + * shape is [1]. + * + * Available since API level 28. + */ + ANEURALNETWORKS_MEAN = 31, + + /** + * Pads a tensor. + * + * This operation pads a tensor according to the specified paddings. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * (full support since API level 29, see the output section) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor, specifying the tensor to be padded. + * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings + * for each spatial dimension of the input tensor. The shape of the + * tensor must be {rank(input0), 2}. + * padding[i, 0] specifies the number of elements to be padded in the + * front of dimension i. + * padding[i, 1] specifies the number of elements to be padded after the + * end of dimension i. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. The + * output tensor has the same rank as input0, and each + * dimension of the output tensor has the same size as the + * corresponding dimension of the input tensor plus the size + * of the padding: + * output0.dimension[i] = + * padding[i, 0] + input0.dimension[i] + padding[i, 1] + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * NOTE: Before API level 29, the pad value for + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined. + * Since API level 29, the pad value is always the logical zero. + * + * Available since API level 28. + */ + ANEURALNETWORKS_PAD = 32, + + /** + * SpaceToBatch for N-Dimensional tensors. + * + * This operation divides "spatial" dimensions [1, ..., M] of the input into + * a grid of blocks of shape block_shape, and interleaves these blocks with + * the "batch" dimension (0) such that in the output, the spatial dimensions + * [1, ..., M] correspond to the position within the grid, and the batch + * dimension combines both the position within a spatial block and the + * original batch position. Prior to division into blocks, the spatial + * dimensions of the input are optionally zero padded according to paddings. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * (full support since API level 29, see the output section) + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * NCHW is supported since API level 29. + * + * Inputs: + * * 0: An n-D tensor, specifying the input. + * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block + * sizes for each spatial dimension of the input tensor. All values + * must be >= 1. + * * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings + * for each spatial dimension of the input tensor. All values must be + * >= 0. The shape of the tensor must be {M, 2}, where M is the number + * of spatial dimensions. + * padding[i, 0] specifies the number of element to be padded in the + * front of dimension i. + * padding[i, 1] specifies the number of element to be padded after the + * end of dimension i. + * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * Available since API level 29. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * NOTE: Before API level 29, the pad value for + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined. + * Since API level 29, the pad value is always the logical zero. + * + * Available since API level 28. + */ + ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33, + + /** + * Removes dimensions of size 1 from the shape of a tensor. + * + * Given a tensor input, this operation returns a tensor of the same + * {@link OperandCode} with all dimensions of size 1 removed. If you don't + * want to remove all size 1 dimensions, you can remove specific size 1 + * dimensions by specifying the axes (input1). + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor, the tensor to be squeezed. + * * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The + * dimensions to squeeze. If specified only squeezes the dimensions + * listed. Otherwise, squeezes all dimensions. The dimension index + * starts at 0. An error must be reported if squeezing a dimension that + * is not 1. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. Contains the + * same data as input, but has one or more dimensions of size 1 + * removed. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * If all input dimensions are equal to 1 and are to be squeezed, the + * output shape is [1]. + * + * Available since API level 28. + */ + ANEURALNETWORKS_SQUEEZE = 34, + + /** + * Extracts a strided slice of a tensor. + * + * Roughly speaking, this op extracts a slice of size (end - begin) / stride + * from the given input tensor. Starting at the location specified by begin + * the slice continues by adding stride to the index until all dimensions + * are not less than end. Note that a stride can be negative, which causes a + * reverse slice. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor, specifying the tensor to be sliced. + * * 1: begin, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The + * starts of the dimensions of the input tensor to be sliced. The + * length must be of rank(input0). + * * 2: end, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The + * ends of the dimensions of the input tensor to be sliced. The length + * must be of rank(input0). + * * 3: strides, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The + * strides of the dimensions of the input tensor to be sliced. The + * length must be of rank(input0). The entries must be non-zero. + * * 4: begin_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit + * of begin_mask is set, begin[i] is ignored and the fullest possible + * range in that dimension is used instead. + * * 5: end_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit of + * end_mask is set, end[i] is ignored and the fullest possible range in + * that dimension is used instead. + * * 6: shrink_axis_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the + * ith bit of shrink_axis_mask is set, the ith dimension specification + * shrinks the dimensionality by 1, taking on the value at index + * begin[i]. In this case, the ith specification must define a + * slice of size 1, e.g. begin[i] = x, end[i] = x + 1. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0 and rank (n - k), + * where k is the number of bits set in shrink_axis_mask. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * If shrink_axis_mask is true for all input dimensions, the output + * shape is [1]. + * + * Available since API level 28. + */ + ANEURALNETWORKS_STRIDED_SLICE = 35, + + /** + * Element-wise subtraction of two tensors. + * + * Takes two input tensors of identical {@link OperandCode} and compatible + * dimensions. The output is the result of subtracting the second input + * tensor from the first one, optionally modified by an activation function. + * + * Two dimensions are compatible when: + * 1. they are equal, or + * 2. one of them is 1 + * + * The size of the output is the maximum size along each dimension of the + * input operands. It starts with the trailing dimensions, and works its way + * forward. + * + * Example: + * input1.dimension = {4, 1, 2} + * input2.dimension = {5, 4, 3, 1} + * output.dimension = {5, 4, 3, 2} + * + * Since API level 29, generic zero-sized input tensor is supported. Zero + * dimension is only compatible with 0 or 1. The size of the output + * dimension is zero if either of corresponding input dimension is zero. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor, specifying the first input. + * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions + * as input0. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, + * the {@link FuseCode} must be "NONE". + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint can be different from inputs' scale and zeroPoint. + * + * Available since API level 28. + */ + ANEURALNETWORKS_SUB = 36, + + /** + * Transposes the input tensor, permuting the dimensions according to the + * perm tensor. + * + * The returned tensor's dimension i corresponds to the input dimension + * perm[i]. If perm is not given, it is set to (n-1...0), where n is the + * rank of the input tensor. Hence by default, this operation performs a + * regular matrix transpose on 2-D input Tensors. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor, specifying the tensor to be transposed. + * Since API level 29, this tensor may be zero-sized. + * * 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, + * the permutation of the dimensions of the input tensor. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 28. + */ + ANEURALNETWORKS_TRANSPOSE = 37, + + // Operations below are available since API level 29. + + /** + * Computes the absolute value of a tensor, element-wise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} (since API level 30) + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_ABS = 38, + + /** + * Returns the index of the largest element along an axis. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: An n-D tensor specifying the input. Must be non-empty. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to + * reduce across. Negative index is used to specify axis from the + * end (e.g. -1 for the last axis). Must be in the range [-n, n). + * + * Outputs: + * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor. + * If input is 1-dimensional, the output shape is [1]. + * + * Available since API level 29. + */ + // There is no underscore in ARG_MAX to avoid name conflict with + // the macro defined in libc/kernel/uapi/linux/limits.h. + ANEURALNETWORKS_ARGMAX = 39, + + /** + * Returns the index of the smallest element along an axis. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: An n-D tensor specifying the input. Must be non-empty. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to + * reduce across. Negative index is used to specify axis from the + * end (e.g. -1 for the last axis). Must be in the range [-n, n). + * + * Outputs: + * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor. + * If input is 1-dimensional, the output shape is [1]. + * + * Available since API level 29. + */ + ANEURALNETWORKS_ARGMIN = 40, // See ARGMAX for naming discussion. + + /** + * Transform axis-aligned bounding box proposals using bounding box deltas. + * + * Given the positions of bounding box proposals and the corresponding + * bounding box deltas for each class, return the refined bounding box + * regions. The resulting bounding boxes are cliped against the edges of + * the image. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM} + * + * Inputs: + * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the + * bounding box proposals, each line with format [x1, y1, x2, y2]. + * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, + * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois + * is supported for this tensor. + * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the + * bounding box delta for each region of interest and each class. The + * bounding box deltas are organized in the following order + * [dx, dy, dw, dh], where dx and dy is the relative correction factor + * for the center position of the bounding box with respect to the width + * and height, dw and dh is the log-scale relative correction factor + * for the width and height. For input0 of type + * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, this tensor should be + * of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is + * supported for this tensor. + * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape + * [num_rois], specifying the batch index of each box. Boxes with + * the same batch index are grouped together. Zero num_rois is + * supported for this tensor. + * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of + * each image in the batch, each line with format + * [image_height, image_width]. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0, with shape + * [num_rois, num_classes * 4], specifying the coordinates of each + * output bounding box for each class, with format [x1, y1, x2, y2]. + * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the + * scale must be 0.125 and the zero point must be 0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM = 41, + + /** + * A recurrent neural network layer that applies an LSTM cell to a + * sequence of inputs in forward and backward directions. + * + * The op supports cross-linking via an auxiliary input. Regular cell feeds + * one input into the two RNN cells in the following way: + * + * INPUT (INPUT_REVERSED) + * | | + * --------------------- + * | FW_LSTM BW_LSTM | + * --------------------- + * | | + * FW_OUT BW_OUT + * + * An op with cross-linking takes two inputs and feeds them into the RNN + * cells in the following way: + * + * AUX_INPUT (AUX_INPUT_REVERSED) + * | | + * INPUT | (INPUT_R'D.)| + * | | | | + * ----------------------- + * | \ / \ / | + * | FW_LSTM BW_LSTM | + * ----------------------- + * | | + * FW_OUT BW_OUT + * + * The cross-linking mode is enabled iff auxiliary input and auxiliary + * weights are present. While stacking this op on top of itself, this + * allows to connect both forward and backward outputs from previous cell + * to the next cell's input. + * + * Since API level 30 parallel linking mode is supported. The mode is + * enabled if auxiliary input is present but auxiliary weights are omitted. + * In this case, the cell feeds inputs into the RNN in the following way: + * + * INPUT (AUX_INPUT_REVERSED) + * | | + * --------------------- + * | FW_LSTM BW_LSTM | + * --------------------- + * | | + * FW_OUT BW_OUT + * + * While stacking this op on top of itself, this allows to connect both + * forward and backward outputs from previous cell to the next cell's + * corresponding inputs. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: 3, either time-major or batch-major. + * + * All input and output tensors must be of the same type. + * + * Inputs: + * * 0: The input. + * A 3-D tensor of shape: + * If time-major: [max_time, batch_size, input_size] + * If batch-major: [batch_size, max_time, input_size] + * where "max_time" is the number of timesteps (sequence length), + * "batch_size" corresponds to the batching dimension, and + * "input_size" is the size of the input. + * * 1: The forward input-to-input weights. Optional. + * A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units” + * corresponds to the number of forward cell units. + * * 2: The forward input-to-forget weights. + * A 2-D tensor of shape [fw_num_units, input_size]. + * * 3: The forward input-to-cell weights. + * A 2-D tensor of shape [fw_num_units, input_size]. + * * 4: The forward input-to-output weights. + * A 2-D tensor of shape [fw_num_units, input_size]. + * * 5: The forward recurrent-to-input weights. Optional. + * A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size” + * corresponds to either the number of cell units (i.e., fw_num_units), + * or the second dimension of the “fw_projection_weights”, if defined. + * * 6: The forward recurrent-to-forget weights. + * A 2-D tensor of shape [fw_num_units, fw_output_size]. + * * 7: The forward recurrent-to-cell weights. + * A 2-D tensor of shape [fw_num_units, fw_output_size]. + * * 8: The forward recurrent-to-output weights. + * A 2-D tensor of shape [fw_num_units, fw_output_size]. + * * 9: The forward cell-to-input weights. Optional. + * A 1-D tensor of shape [fw_num_units]. + * * 10: The forward cell-to-forget weights. Optional. + * A 1-D tensor of shape [fw_num_units]. + * * 11: The forward cell-to-output weights. Optional. + * A 1-D tensor of shape [fw_num_units]. + * * 12: The forward input gate bias. Optional. + * A 1-D tensor of shape [fw_num_units]. + * * 13: The forward forget gate bias. + * A 1-D tensor of shape [fw_num_units]. + * * 14: The forward cell gate bias. + * A 1-D tensor of shape [fw_num_units]. + * * 15: The forward output gate bias. + * A 1-D tensor of shape [fw_num_units]. + * * 16: The forward projection weights. Optional. + * A 2-D tensor of shape [fw_output_size, fw_num_units]. + * * 17: The forward projection bias. Optional. + * A 1-D tensor of shape [fw_output_size]. + * * 18: The backward input-to-input weights. Optional. + * A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units” + * corresponds to the number of backward cell units. + * * 19: The backward input-to-forget weights. + * A 2-D tensor of shape [bw_num_units, input_size]. + * * 20: The backward input-to-cell weights. + * A 2-D tensor of shape [bw_num_units, input_size]. + * * 21: The backward input-to-output weights. + * A 2-D tensor of shape [bw_num_units, input_size]. + * * 22: The backward recurrent-to-input weights. Optional. + * A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size” + * corresponds to either the number of cell units (i.e., “bw_num_units”), + * or the second dimension of the “bw_projection_weights”, if defined. + * * 23: The backward recurrent-to-forget weights. + * A 2-D tensor of shape [bw_num_units, bw_output_size]. + * * 24: The backward recurrent-to-cell weights. + * A 2-D tensor of shape [bw_num_units, bw_output_size]. + * * 25: The backward recurrent-to-output weights. + * A 2-D tensor of shape [bw_num_units, bw_output_size]. + * * 26: The backward cell-to-input weights. Optional. + * A 1-D tensor of shape [bw_num_units]. + * * 27: The backward cell-to-forget weights. Optional. + * A 1-D tensor of shape [bw_num_units]. + * * 28: The backward cell-to-output weights. Optional. + * A 1-D tensor of shape [bw_num_units]. + * * 29: The backward input gate bias. Optional. + * A 1-D tensor of shape [bw_num_units]. + * * 30: The backward forget gate bias. + * A 1-D tensor of shape [bw_num_units]. + * * 31: The backward cell gate bias. + * A 1-D tensor of shape [bw_num_units]. + * * 32: The backward output gate bias. + * A 1-D tensor of shape [bw_num_units]. + * * 33: The backward projection weights. Optional. + * A 2-D tensor of shape [bw_output_size, bw_num_units]. + * * 34: The backward projection bias. Optional. + * A 1-D tensor of shape [bw_output_size]. + * * 35: The forward input activation state. + * A 2-D tensor of shape [batch_size, bw_output_size]. + * * 36: The forward input cell state. + * A 2-D tensor of shape [batch_size, bw_num_units]. + * * 37: The backward input activation state. + * A 2-D tensor of shape [batch_size, bw_output_size]. + * * 38: The backward input cell state. + * A 2-D tensor of shape [batch_size, bw_num_units]. + * * 39: The auxiliary input. Optional. + * A 3-D tensor of shape [max_time, batch_size, aux_input_size], + * where “batch_size” corresponds to the batching dimension, and + * “aux_input_size” is the size of the auxiliary input. Optional. See + * the docs above for the usage modes explanation. + * * 40: The forward auxiliary input-to-input weights. + * Optional. See the docs above for the usage modes explanation. + * A 2-D tensor of shape [fw_num_units, aux_input_size]. + * * 41: The forward auxiliary input-to-forget weights. + * Optional. See the docs above for the usage modes explanation. + * A 2-D tensor of shape [fw_num_units, aux_input_size]. + * * 42: The forward auxiliary input-to-cell weights. + * Optional. See the docs above for the usage modes explanation. + * A 2-D tensor of shape [fw_num_units, aux_input_size]. + * * 43: The forward auxiliary input-to-output weights. + * Optional. See the docs above for the usage modes explanation. + * A 2-D tensor of shape [fw_num_units, aux_input_size]. + * * 44: The backward auxiliary input-to-input weights. + * Optional. See the docs above for the usage modes explanation. + * A 2-D tensor of shape [bw_num_units, aux_input_size]. + * * 45: The backward auxiliary input-to-forget weights. + * Optional. See the docs above for the usage modes explanation. + * A 2-D tensor of shape [bw_num_units, aux_input_size]. + * * 46: The backward auxiliary input-to-cell weights. + * Optional. See the docs above for the usage modes explanation. + * A 2-D tensor of shape [bw_num_units, aux_input_size]. + * * 47: The backward auxiliary input-to-output weights. + * Optional. See the docs above for the usage modes explanation. + * A 2-D tensor of shape [bw_num_units, aux_input_size]. + * * 48: The activation function. + * A value indicating the activation function: + * <ul> + * <li>0: None; + * <li>1: Relu; + * <li>3: Relu6; + * <li>4: Tanh; + * <li>6: Sigmoid. + * </ul> + * * 49: The clipping threshold for the cell state, such + * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 + * then clipping is disabled. + * If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, + * this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, + * otherwise if all the input tensors have the type + * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be + * of type {@link ANEURALNETWORKS_FLOAT16}. + * * 50: The clipping threshold for the output from the + * projection layer, such that values are bound within + * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. + * If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, + * this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, + * otherwise if all the input tensors have the type + * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be + * of type {@link ANEURALNETWORKS_FLOAT16}. + * * 51: merge_outputs + * An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs + * from forward and backward cells should be merged. + * * 52: time_major + * An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format + * of input and output tensors. + * * 53: The forward input layer normalization weights. Optional. + * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs + * to activation at input gate. + * * 54: The forward forget layer normalization weights. Optional. + * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs + * to activation at forget gate. + * * 55: The forward cell layer normalization weights. Optional. + * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs + * to activation at cell gate. + * * 56: The forward output layer normalization weights. Optional. + * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs + * to activation at output gate. + * * 57: The backward input layer normalization weights. Optional. + * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs + * to activation at input gate. + * * 58: The backward forget layer normalization weights. Optional. + * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs + * to activation at forget gate. + * * 59: The backward cell layer normalization weights. Optional. + * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs + * to activation at cell gate. + * * 60: The backward output layer normalization weights. Optional. + * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs + * to activation at output gate. + * + * Outputs: + * * 0: The forward output. + * A 3-D tensor of shape: + * If time-major and not merge_outputs: + * [max_time, batch_size, fw_output_size] + * If time-major and merge_outputs: + * [max_time, batch_size, fw_output_size + bw_output_size] + * If batch-major and not merge_outputs: + * [batch_size, max_time, fw_output_size] + * If batch-major and merge_outputs: + * [batch_size, max_time, fw_output_size + bw_output_size] + * * 1: The backward output. Unused if merge_outputs is true. + * A 3-D tensor of shape: + * If time-major: [max_time, batch_size, bw_output_size] + * If batch-major: [batch_size, max_time, bw_output_size] + * * 2: The forward activation state output. + * A 2-D tensor of shape [batch_size, fw_output_size] containing an + * activation state from the last time step in the sequence. This + * output is optional and can be omitted. If this output is present + * then outputs 3-5 must be present as well. + * Available since API level 30. + * * 3: The forward cell state output. + * A tensor of shape [batch_size, fw_cell_size] containing a cell state + * from the last time step in the sequence. This output is optional + * and can be omitted. If this output is present + * then outputs 2, 4, 5 must be present as well. + * Available since API level 30. + * * 4: The backward activation state output. + * A 2-D tensor of shape [batch_size, bw_output_size] containing an + * activation state from the last time step in the sequence. This + * output is optional and can be omitted. If this output is present + * then outputs 2, 3, 5 must be present as well. + * Available since API level 30. + * * 5: The backward cell state output. + * A tensor of shape [batch_size, bw_cell_size] containing a cell state + * from the last time step in the sequence. This output is optional + * and can be omitted. If this output is present + * then outputs 2-4 must be present as well. + * Available since API level 30. + * + * Available since API level 29. + * + * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI + * does not maintain internal states. This operator does not support the usage pattern in which + * multiple cells are chained and state tensors are propagated. + */ + ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42, + + /** + * A recurrent neural network layer that applies a basic RNN cell to a + * sequence of inputs in forward and backward directions. + * + * This Op unrolls the input along the sequence dimension, and implements + * the following operation for each element in the sequence s = + * 1...sequence_length: + * fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ + + * fw_state * fw_recurrent_weights’ + fw_bias) + * + * And for each element in sequence t = sequence_length : 1 + * bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ + + * bw_state * bw_recurrent_weights’ + bw_bias) + * + * Where: + * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs; + * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the + * current “state” which itself is the output from the previous time step + * computation; + * * “{fw,bw}_bias” is a bias vector (added to each output vector in the + * batch); + * * “activation” is the function passed as the “fused_activation_function” + * argument (if not “NONE”). + * + * The op supports cross-linking via an auxiliary input. Regular cell feeds + * one input into the two RNN cells in the following way: + * + * INPUT (INPUT_REVERSED) + * | | + * --------------------- + * | FW_RNN BW_RNN | + * --------------------- + * | | + * FW_OUT BW_OUT + * + * An op with cross-linking takes two inputs and feeds them into the RNN + * cells in the following way: + * + * AUX_INPUT (AUX_INPUT_REVERSED) + * | | + * INPUT | (INPUT_R'D.)| + * | | | | + * ----------------------- + * | \ / \ / | + * | FW_RNN BW_RNN | + * ----------------------- + * | | + * FW_OUT BW_OUT + * + * The cross-linking mode is enabled iff auxiliary input and auxiliary + * weights are present. While stacking this op on top of itself, this + * allows to connect both forward and backward outputs from previous cell + * to the next cell's input. + * + * Since API level 30 parallel linking mode is supported. The mode is + * enabled if auxiliary input is present but auxiliary weights are omitted. + * In this case, the cell feeds inputs into the RNN in the following way: + * + * INPUT (AUX_INPUT_REVERSED) + * | | + * --------------------- + * | FW_RNN BW_RNN | + * --------------------- + * | | + * FW_OUT BW_OUT + * + * While stacking this op on top of itself, this allows to connect both + * forward and backward outputs from previous cell to the next cell's + * corresponding inputs. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * The input tensors must all be the same type. + * + * Inputs: + * * 0: input. + * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If + * it is set to true, then the input has a shape [maxTime, batchSize, + * inputSize], otherwise the input has a shape [batchSize, maxTime, + * inputSize]. + * * 1: fwWeights. + * A 2-D tensor of shape [fwNumUnits, inputSize]. + * * 2: fwRecurrentWeights. + * A 2-D tensor of shape [fwNumUnits, fwNumUnits]. + * * 3: fwBias. + * A 1-D tensor of shape [fwNumUnits]. + * * 4: fwHiddenState. + * A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden + * state input for the first time step of the computation. + * * 5: bwWeights. + * A 2-D tensor of shape [bwNumUnits, inputSize]. + * * 6: bwRecurrentWeights. + * A 2-D tensor of shape [bwNumUnits, bwNumUnits]. + * * 7: bwBias. + * A 1-D tensor of shape [bwNumUnits]. + * * 8: bwHiddenState + * A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden + * state input for the first time step of the computation. + * * 9: auxInput. + * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If + * it is set to true, then the input has a shape [maxTime, batchSize, + * auxInputSize], otherwise the input has a shape [batchSize, maxTime, + * auxInputSize]. Can be omitted. See the docs above for the usage + * modes explanation. + * * 10:fwAuxWeights. + * A 2-D tensor of shape [fwNumUnits, auxInputSize]. Can be omitted. + * See the docs above for the usage modes explanation. + * * 11:bwAuxWeights. + * A 2-D tensor of shape [bwNumUnits, auxInputSize]. Can be omitted. + * See the docs above for the usage modes explanation. + * * 12:fusedActivationFunction. + * A {@link FuseCode} value indicating the activation function. If + * “NONE” is specified then it results in a linear activation. + * * 13:timeMajor + * An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format + * of input and output tensors. + * * 14:mergeOutputs + * An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs + * from forward and backward cells are separate (if set to false) or + * concatenated (if set to true). + * Outputs: + * * 0: fwOutput. + * A 3-D tensor. The first two dimensions of the shape are defined by + * the input 6 (timeMajor) and the third dimension is defined by the + * input 14 (mergeOutputs). If timeMajor is set to true, then the first + * two dimensions are [maxTime, batchSize], otherwise they are set to + * [batchSize, maxTime]. If mergeOutputs is set to true, then the third + * dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set + * to fwNumUnits. + * * 1: bwOutput. + * A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then + * this tensor is not produced. The shape is defined by the input 6 + * (timeMajor). If it is set to true, then the shape is set to + * [maxTime, batchSize, bwNumUnits], otherwise the shape is set to + * [batchSize, maxTime, bwNumUnits]. + * * 2: The forward hidden state output. + * A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden + * state from the last time step in the sequence. This output is + * optional and can be omitted. If this output is present then output + * 3 must be present as well. + * Available since API level 30. + * * 3: The backward hidden state output. + * A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden + * state from the last time step in the sequence. This output is + * optional and can be omitted. If this output is present then output + * 2 must be present as well. + * Available since API level 30. + * + * Available since API level 29. + * + * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI + * does not maintain internal states. This operator does not support the usage pattern in which + * multiple cells are chained and state tensors are propagated. + */ + ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN = 43, + + /** + * Greedily selects a subset of bounding boxes in descending order of score. + * + * This op applies NMS algorithm to each class. In each loop of execution, + * the box with maximum score gets selected and removed from the pending set. + * The scores of the rest of boxes are lowered according to the + * intersection-over-union (IOU) overlapping with the previously selected + * boxes and a specified NMS kernel method. Any boxes with score less + * than a threshold are removed from the pending set. + * + * Three NMS kernels are supported: + * * Hard: score_new = score_old * (1 if IoU < threshold else 0) + * * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU) + * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma) + * + * Axis-aligned bounding boxes are represented by its upper-left corner + * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid + * bounding box should satisfy x1 <= x2 and y1 <= y2. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Inputs: + * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score + * of each bounding box proposal. The boxes are grouped by batches in the + * first dimension. Zero num_rois is supported for this tensor. + * * 1: A 2-D Tensor specifying the bounding boxes of shape + * [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2]. + * The boxes are grouped by batches in the first dimension. The sequential + * order of the boxes corresponds with input0. For input0 of type + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should be of + * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and + * scale of 0.125. + * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, + * with zeroPoint of -128 and scale of 0.125. + * Zero num_rois is supported for this tensor. + * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape + * [num_rois], specifying the batch index of each box. Boxes with + * the same batch index are grouped together. + * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, score_threshold. Boxes + * with scores lower than the threshold are filtered before sending + * to the NMS algorithm. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum + * number of selected bounding boxes for each image. Set to a negative + * value for unlimited number of output bounding boxes. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the NMS + * kernel method, options are 0:hard, 1:linear, 2:gaussian. + * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU + * threshold in hard and linear NMS kernel. This field is ignored if + * gaussian kernel is selected. + * * 7: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the sigma in + * gaussian NMS kernel. This field is ignored if gaussian kernel is + * not selected. + * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, nms_score_threshold. + * Boxes with scores lower than the threshold are dropped during the + * score updating phase in soft NMS. + * + * Outputs: + * * 0: A 1-D Tensor of the same {@link OperandCode} as input0, with shape + * [num_output_rois], specifying the score of each output box. The boxes + * are grouped by batches, but the sequential order in each batch is not + * guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, + * guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * or {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the scale and zero point must be the same as input0. + * * 1: A 2-D Tensor of the same {@link OperandCode} as input1, with shape + * [num_output_rois, 4], specifying the coordinates of each + * output bounding box with the same format as input1. The sequential + * order of the boxes corresponds with output0. For type of + * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the scale must be + * 0.125 and the zero point must be 0. + * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape + * [num_output_rois], specifying the class of each output box. The + * sequential order of the boxes corresponds with output0. + * * 3: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape + * [num_output_rois], specifying the batch index of each box. Boxes + * with the same batch index are grouped together. + * + * Available since API level 29. + */ + ANEURALNETWORKS_BOX_WITH_NMS_LIMIT = 44, + + /** + * Casts a tensor to a type. + * + * This operation ignores the scale and zeroPoint of quanized tensors, + * e.g. it treats a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} input + * as a tensor of uint8 values. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * Since API level 30, casting tensors of the following + * {@link OperandCode} to the same {@link OperandCode} is supported: + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: A tensor. + * + * Outputs: + * * 0: A tensor with the same shape as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_CAST = 45, + + /** + * Shuffle the channels of the input tensor. + * + * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE + * divide the channel dimension into num_groups groups, and reorganize the + * channels by grouping channels with the same index in each group. + * + * Along the channel dimension, the output is calculated using this formula: + * + * output_channel[k * num_groups + g] = input_channel[g * group_size + k] + * + * where group_size = num_channels / num_groups + * + * The number of channels must be divisible by num_groups. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor, specifying the tensor to be shuffled. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of + * groups. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the dimension + * channel shuffle would be performed on. Negative index is used to + * specify axis from the end (e.g. -1 for the last axis). Must be in + * the range [-n, n). + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} and same shape as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_CHANNEL_SHUFFLE = 46, + + /** + * Apply postprocessing steps to bounding box detections. + * + * Bounding box detections are generated by applying transformation on a set + * of predefined anchors with the bounding box deltas from bounding box + * regression. A final step of hard NMS is applied to limit the number of + * returned boxes. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Inputs: + * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying + * the score of each anchor with each class. Class 0 for each + * [batches, num_anchors, 0] is background and will be ignored. + * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with + * the first four values in length_box_encoding specifying the bounding + * box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw], + * where dy and dx is the linear-scale relative correction factor for the + * center position of the bounding box with respect to the width and height, + * dh and dw is the log-scale relative correction factor for the width and + * height. All the entries in length_box_encoding beyond the first four + * values are ignored in this operation. + * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each + * predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and + * ctr_x are the center position of the box, and h and w are the height + * and the width. + * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling + * factor for dy in bounding box deltas. + * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling + * factor for dx in bounding box deltas. + * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling + * factor for dh in bounding box deltas. + * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling + * factor for dw in bounding box deltas. + * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to use regular + * multi-class NMS algorithm that do NMS separately for each class, + * set to false for a faster algorithm that only do one single NMS + * using the highest class score.. + * * 8: An {@link ANEURALNETWORKS_INT32} scalar, max_num_detections, specifying + * the maximum number of boxes for the output. Boxes with the lowest + * scores are discarded to meet the limit. + * * 9: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is + * set to false, specifying the maximum number of classes per detection. + * * 10: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is + * set to true, specifying the maximum number of detections when + * applying NMS algorithm for each single class. + * * 11: A scalar, score_threshold. Boxes with scores lower than the + * threshold are filtered before sending to the NMS algorithm. The + * scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of + * {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of + * {@link ANEURALNETWORKS_FLOAT32} if input0 is of + * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. + * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar + * must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of + * {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of + * {@link ANEURALNETWORKS_FLOAT32} if input0 is of + * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. + * * 13: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to include + * background class in the list of label map for the output, set + * to false to not include the background. When the background + * class is included, it has label 0 and the output classes start + * at 1 in the label map, otherwise, the output classes start at 0. + * + * Outputs: + * * 0: A 2-D tensor of the same {@link OperandCode} as input0, with shape + * [batches, max_num_detections], specifying the score of each output + * detections. + * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the + * coordinates of each output bounding box, with format + * [y1, x1, y2, x2]. + * * 2: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape + * [batches, max_num_detections], specifying the class label for each + * output detection. + * * 3: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape [batches], + * specifying the number of valid output detections for each batch. + * + * Available since API level 29. + */ + ANEURALNETWORKS_DETECTION_POSTPROCESSING = 47, + + /** + * For input tensors x and y, computes x == y elementwise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * This operation supports broadcasting. + * + * Inputs: + * * 0: A tensor. + * * 1: A tensor of the same {@link OperandCode} and dimensions compatible + * with input0. + * + * Outputs: + * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. + * + * Available since API level 29. + */ + ANEURALNETWORKS_EQUAL = 48, + + /** + * Computes exponential of x element-wise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_EXP = 49, + + /** + * Inserts a dimension of 1 into a tensor's shape. + * + * Given a tensor input, this operation inserts a dimension of 1 at the + * given dimension index of input's shape. The dimension index starts at + * zero; if you specify a negative dimension index, it is counted backward + * from the end. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: An n-D tensor. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the dimension + * index to expand. Must be in the range [-(n + 1), (n + 1)). + * + * Outputs: + * * 0: An (n + 1)-D tensor with the same {@link OperandCode} and data as + * input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_EXPAND_DIMS = 50, + + /** + * Gathers values along an axis. + * + * Produces an output tensor with shape + * input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:] + * where: + * # Vector indices (output is rank(input0)). + * output[a_0, ..., a_n, i, b_0, ..., b_n] = + * input0[a_0, ..., a_n, indices[i], b_0, ..., b_n] + * + * # Higher rank indices (output is rank(input0) + rank(indices) - 1). + * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = + * input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: An n-D tensor from which to gather values. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis. + * Negative index is used to specify axis from the end + * (e.g. -1 for the last axis). Must be in the range [-n, n). + * * 2: A k-D tensor {@link ANEURALNETWORKS_TENSOR_INT32} of indices. + * The values must be in the bounds of the corresponding dimensions + * of input0. + * + * Outputs: + * * 0: An (n + k - 1)-D tensor with the same {@link OperandCode} as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_GATHER = 51, + + /** + * Generate aixs-aligned bounding box proposals. + * + * Bounding box proposals are generated by applying transformation on a set + * of predefined anchors with the bounding box deltas from bounding box + * regression. A final step of hard NMS is applied to limit the number of + * returned boxes. + * + * Axis-aligned bounding boxes are represented by its upper-left corner + * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid + * bounding box should satisfy x1 <= x2 and y1 <= y2. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Inputs: + * * 0: A 4-D Tensor specifying the score of each anchor at each + * location. With "NHWC" data layout, the tensor shape is + * [batches, height, width, num_anchors]. With "NCHW" data layout, + * the tensor shape is [batches, num_anchors, height, width]. + * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data + * layout, the tensor shape is [batches, height, width, num_anchors * 4]. + * With "NCHW" data layout, the tensor shape is + * [batches, num_anchors * 4, height, width]. The box deltas are encoded + * in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale + * relative correction factor for the center position of the bounding box + * with respect to the width and height, dw and dh is the log-scale + * relative correction factor for the width and height. The last + * dimensions is the channel dimension. + * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each + * predefined anchor, with format [x1, y1, x2, y2]. For input0 of type + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of + * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with scale of 0.125. + * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of + * each image in the batch, with format [image_height, image_width]. + * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this + * tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with + * scale of 0.125. + * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio + * from the height of original image to the height of feature map. + * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio + * from the width of original image to the width of feature map. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum + * number of boxes before going into the hard NMS algorithm. Boxes + * with the lowest scores are discarded to meet the limit. Set to + * a non-positive value for unlimited number. + * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum + * number of boxes returning from the hard NMS algorithm. Boxes + * with the lowest scores are discarded to meet the limit. Set to + * a non-positive value for unlimited number. + * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU + * threshold for hard NMS. + * * 9: An {@link ANEURALNETWORKS_FLOAT32} scalar, min_size. Boxes with + * height or width lower than the absolute threshold are filtered out. + * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify + * NCHW data layout for input0 and input1. Set to false for NHWC. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0, of shape + * [num_output_rois], specifying the score of each output box. + * The boxes are grouped by batches, but the sequential order in + * each batch is not guaranteed. For type of + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero + * point must be the same as input0. + * * 1: A tensor of the same {@link OperandCode} as input3, of shape + * [num_output_rois, 4], specifying the coordinates of each output + * bounding box for each class, with format [x1, y1, x2, y2]. + * The sequential order of the boxes corresponds with output0. + * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the + * scale must be 0.125 and the zero point must be 0. + * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape + * [num_output_rois], specifying the batch index of each box. Boxes + * with the same batch index are grouped together. + * + * Available since API level 29. + */ + ANEURALNETWORKS_GENERATE_PROPOSALS = 52, + + /** + * For input tensors x and y, computes x > y elementwise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * This operation supports broadcasting. + * + * Inputs: + * * 0: A tensor. + * * 1: A tensor of the same {@link OperandCode} and dimensions compatible + * with input0. + * + * Outputs: + * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. + * + * Available since API level 29. + */ + ANEURALNETWORKS_GREATER = 53, + /** + * For input tensors x and y, computes x >= y elementwise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * This operation supports broadcasting. + * + * Inputs: + * * 0: A tensor. + * * 1: A tensor of the same {@link OperandCode} and dimensions compatible + * with input0. + * + * Outputs: + * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. + * + * Available since API level 29. + */ + ANEURALNETWORKS_GREATER_EQUAL = 54, + + /** + * Performs a grouped 2-D convolution operation. + * + * Given an input tensor of shape [batches, height, width, depth_in] and a + * filter tensor of shape [depth_out, filter_height, filter_width, depth_group] + * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV + * applies a group of different filters to each input channel group, then + * concatenates the results together. + * + * Specifically, the input channels are divided into num_groups groups, each with + * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional + * filters are also divided into num_groups groups, i.e. depth_out is divisible + * by num_groups. GROUPED_CONV applies each group of filters to the corresponding + * input channel group, and the result are concatenated together. + * + * The output dimensions are functions of the filter dimensions, stride, and + * padding. + * + * The values in the output tensor are computed as: + * + * output[b, i, j, g * channel_multiplier + q] = + * sum_{di, dj, dk} ( + * input[b, strides[1] * i + di, strides[2] * j + dj, + * g * depth_group + dk] * + * filter[g * channel_multiplier + q, di, dj, dk] + * ) + bias[channel] + * + * where channel_multiplier = depth_out / num_groups + * + * Supported tensor {@link OperandCode} configurations: + * * 16 bit floating point: + * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. + * + * * 32 bit floating point: + * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. + * + * * Quantized: + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to + * * * input.scale * filter.scale). + * + * * Quantized signed (since API level 30): + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to + * * * input.scale * filter.scale). + * + * * Quantized with symmetric per channel quantization for the filter: + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, + * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). + * + * * Quantized signed with filter symmetric per channel quantization (since API level 30): + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, + * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * + * Both explicit padding and implicit padding are supported. + * + * Inputs (explicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], + * specifying the input, where depth_in = num_groups * depth_group. + * * 1: A 4-D tensor, of shape + * [depth_out, filter_height, filter_width, depth_group], specifying + * the filter, where depth_out must be divisible by num_groups. For + * tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} + * the channel dimension (channelDim at + * {@link ANeuralNetworksSymmPerChannelQuantParams}) must be set to 0. + * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input + * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or + * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint + * of 0 and bias_scale == input_scale * filter_scale. For filter tensor + * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias + * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of + * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to + * bias_scale[i] = input_scale * filter_scale[i]. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the left, in the ‘width’ dimension. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the right, in the ‘width’ dimension. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the top, in the ‘height’ dimension. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the bottom, in the ‘height’ dimension. + * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of + * groups. + * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 11: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify + * NCHW data layout for input0 and output0. Set to false for NHWC. + * + * Inputs (implicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], + * specifying the input, where depth_in = num_groups * depth_group. + * * 1: A 4-D tensor, of shape + * [depth_out, filter_height, filter_width, depth_group], specifying + * the filter, where depth_out must be divisible by num_groups. For + * tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} + * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) + * must be set to 0. + * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input + * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or + * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same + * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint + * of 0 and bias_scale == input_scale * filter_scale. For filter tensor + * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias + * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of + * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to + * bias_scale[i] = input_scale * filter_scale[i]. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit + * padding scheme, has to be one of the + * {@link PaddingCode} values. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of + * groups. + * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify + * NCHW data layout for input0 and output0. Set to false for NHWC. + * + * Outputs: + * * 0: The output 4-D tensor, of shape + * [batches, out_height, out_width, depth_out]. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint can be different from inputs' scale and zeroPoint. + * + * Available since API level 29. + */ + ANEURALNETWORKS_GROUPED_CONV_2D = 55, + + /** + * Localize the maximum keypoints from heatmaps. + * + * This operation approximates the accurate maximum keypoint scores and + * indices after bicubic upscaling by using Taylor expansion up to the + * quadratic term. + * + * The bounding box is represented by its upper-left corner coordinate + * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. + * A valid bounding box should satisfy x1 <= x2 and y1 <= y2. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * + * Inputs: + * * 0: A 4-D Tensor of shape + * [num_boxes, heatmap_size, heatmap_size, num_keypoints], + * specifying the heatmaps, the height and width of heatmaps should + * be the same, and must be greater than or equal to 2. + * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes, + * each with format [x1, y1, x2, y2]. For input0 of type + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should + * be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint + * of 0 and scale of 0.125. + * For input0 of type + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor + * should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with + * zeroPoint of -128 and scale of 0.125. + * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify + * NCHW data layout for input0. Set to false for NHWC. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0, with shape + * [num_boxes, num_keypoints], specifying score of the keypoints. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint can be different from input0 scale and zeroPoint. + * * 1: A tensor of the same {@link OperandCode} as input1, with shape + * [num_boxes, num_keypoints, 2], specifying the location of + * the keypoints, the second dimension is organized as + * [keypoint_x, keypoint_y]. + * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the + * scale must be 0.125 and the zero point must be 0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT = 56, + + /** + * Applies instance normalization to the input tensor. + * + * The values in the output tensor are computed as: + * + * output[b, h, w, c] = + * (input[b, h, w, c] - mean[b, c]) * gamma / + * sqrt(var[b, c] + epsilon) + beta + * + * Where the mean and variance are computed across the spatial dimensions: + * + * mean[b, c] = + * sum_{h, w}(input[b, h, w, c]) / sum(1) + * + * var[b, c] = + * sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1) + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * + * Inputs: + * * 0: An n-D tensor, specifying the tensor to be normalized. + * * 1: A scalar, specifying gamma, the scale applied to the normalized + * tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if + * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of + * {@link ANEURALNETWORKS_FLOAT32} if input0 is of + * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. + * * 2: A scalar, specifying beta, the offset applied to the normalized + * tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if + * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of + * {@link ANEURALNETWORKS_FLOAT32} if input0 is of + * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. + * * 3: A scalar, specifying epsilon, the small value added to variance to + * avoid dividing by zero. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if + * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of + * {@link ANEURALNETWORKS_FLOAT32} if input0 is of + * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. + * * 4: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify + * NCHW data layout for input0 and output0. Set to false for NHWC. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} and same shape as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_INSTANCE_NORMALIZATION = 57, + + /** + * For input tensors x and y, computes x < y elementwise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * This operation supports broadcasting. + * + * Inputs: + * * 0: A tensor. + * * 1: A tensor of the same {@link OperandCode} and dimensions compatible + * with input0. + * + * Outputs: + * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. + * + * Available since API level 29. + */ + ANEURALNETWORKS_LESS = 58, + + /** + * For input tensors x and y, computes x <= y elementwise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * This operation supports broadcasting. + * + * Inputs: + * * 0: A tensor. + * * 1: A tensor of the same {@link OperandCode} and dimensions compatible + * with input0. + * + * Outputs: + * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. + * + * Available since API level 29. + */ + ANEURALNETWORKS_LESS_EQUAL = 59, + + /** + * Computes natural logarithm of x element-wise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_LOG = 60, + + /** + * Returns the truth value of x AND y element-wise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * + * Supported tensor rank: from 1 + * + * This operation supports broadcasting. + * + * Inputs: + * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. + * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions + * compatible with input0. + * + * Outputs: + * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. + * + * Available since API level 29. + */ + ANEURALNETWORKS_LOGICAL_AND = 61, + + /** + * Computes the truth value of NOT x element-wise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_LOGICAL_NOT = 62, + + /** + * Returns the truth value of x OR y element-wise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * + * Supported tensor rank: from 1 + * + * This operation supports broadcasting. + * + * Inputs: + * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. + * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions + * compatible with input0. + * + * Outputs: + * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. + * + * Available since API level 29. + */ + ANEURALNETWORKS_LOGICAL_OR = 63, + + /** + * Computes the log softmax activations given logits. + * + * The output is calculated using this formula: + * + * output = logits * beta - log(reduce_sum(exp(logits * beta), axis)) + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor specifying the input logits. + * * 1: A scalar, specifying the positive scaling factor for the exponent, + * beta. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta + * value must be of {@link ANEURALNETWORKS_FLOAT16}. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta + * value must be of {@link ANEURALNETWORKS_FLOAT32}. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to + * reduce across. Negative index is used to specify axis from the + * end (e.g. -1 for the last axis). Must be in the range [-n, n). + * + * Outputs: + * * 0: The output tensor of the same {@link OperandCode} and shape as + * input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_LOG_SOFTMAX = 64, + + /** + * Returns the element-wise maximum of two tensors. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor. + * * 1: A tensor of the same {@link OperandCode} and compatible dimensions + * with input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, + * the scales and zeroPoint can be different from input0 scale and zeroPoint. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, + * the scale and zeroPoint can be different from inputs' scale and zeroPoint. + * + * Available since API level 29. + */ + ANEURALNETWORKS_MAXIMUM = 65, + + /** + * Returns the element-wise minimum of two tensors. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor. + * * 1: A tensor of the same {@link OperandCode} and compatible dimensions + * with input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, + * the scales and zeroPoint can be different from input0 scale and zeroPoint. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, + * the scale and zeroPoint can be different from inputs' scale and zeroPoint. + * + * Available since API level 29. + */ + ANEURALNETWORKS_MINIMUM = 66, + + /** + * Computes numerical negative value element-wise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_NEG = 67, + + /** + * For input tensors x and y, computes x != y elementwise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * This operation supports broadcasting. + * + * Inputs: + * * 0: A tensor. + * * 1: A tensor of the same {@link OperandCode} and dimensions compatible + * with input0. + * + * Outputs: + * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. + * + * Available since API level 29. + */ + ANEURALNETWORKS_NOT_EQUAL = 68, + + /** + * Pads a tensor with the given constant value according to the specified + * paddings. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor, specifying the tensor to be padded. + * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings + * for each spatial dimension of the input tensor. The shape of the + * tensor must be {rank(input0), 2}. + * padding[i, 0] specifies the number of elements to be padded in the + * front of dimension i. + * padding[i, 1] specifies the number of elements to be padded after + * the end of dimension i. + * * 2: An scalar specifying the value to use for padding input0. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the + * pad value must be of {@link ANEURALNETWORKS_FLOAT16}. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the + * pad value must be of {@link ANEURALNETWORKS_FLOAT32}. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the pad value must be of {@link ANEURALNETWORKS_INT32}. The + * scale and zeroPoint are assumed to be the same as in input0. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. The + * output tensor has the same rank as input0, and each + * dimension of the output tensor has the same size as the + * corresponding dimension of the input tensor plus the size + * of the padding: + * output0.dimension[i] = + * padding[i, 0] + input0.dimension[i] + padding[i, 1] + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_PAD_V2 = 69, + + /** + * Computes the power of one value to another. + * + * Given a tensor base and a tensor exponent, this operation computes + * base^exponent elementwise. + * + * This operations supports broadcasting. The size of the output is the + * maximum size along each dimension of the input operands. It starts with + * the trailing dimensions, and works its way forward. + * + * For example: + * base.dimension = {4, 1, 2} + * exponent.dimension = {5, 4, 3, 1} + * output.dimension = {5, 4, 3, 2} + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: A tensor specifying the base. + * * 1: A tensor specifying the exponent. + * + * Outputs: + * * 0: An output tensor. + * + * Available since API level 29. + */ + ANEURALNETWORKS_POW = 70, + + /** + * Parametric Rectified Linear Unit. + * + * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha + * is a learned array with the same {@link OperandCode} and compatible + * dimensions as input x. + * + * Two dimensions are compatible when: + * 1. they are equal, or + * 2. one of them is 1 + * + * The size of the output is the maximum size along each dimension of the + * input operands. It starts with the trailing dimensions, and works its way + * forward. + * + * Example: + * input.dimension = {4, 1, 2} + * alpha.dimension = {5, 4, 3, 1} + * output.dimension = {5, 4, 3, 2} + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: A tensor, specifying the input. + * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions + * as input0, specifying the alpha. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scales and zeroPoint can be different from input0 scale and zeroPoint. + * + * Available since API level 29. + */ + ANEURALNETWORKS_PRELU = 71, + + /** + * Quantizes the input tensor. + * + * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} output tensor is: + * + * output = max(0, min(255, round(input / scale) + zeroPoint) + * + * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} output + * tensor is: + * + * output = max(-128, min(127, round(input / scale) + zeroPoint) + * + * Supported input tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported output tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: A tensor, may be zero-sized. + * + * Outputs: + * * 0: The output tensor of same shape as input0, but with + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or. + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}. + * + * Available since API level 29. + */ + ANEURALNETWORKS_QUANTIZE = 72, + + /** + * A version of quantized LSTM, using 16 bit quantization for internal + * state. + * + * There is no projection layer, so cell state size is equal to the output + * size. + * + * Inputs: + * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and shape [numBatches, inputSize] specifying the input to the LSTM + * cell. Tensor is quantized with a fixed quantization range of + * [-1, 127/128] (scale = 1/128, zeroPoint = 128). + * * 1: The input-to-input weights. + * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and shape [outputSize, inputSize] specifying input-to-input part of + * weights for fully-connected layer inside the LSTM cell. + * Quantization zero point and scale must be the same across all the + * weights. + * * 2: The input-to-forget weights. + * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and shape [outputSize, inputSize] specifying input-to-forget part of + * weights for fully-connected layer inside the LSTM cell. + * Quantization zero point and scale must be the same across all the + * weights. + * * 3: The input-to-cell weights. + * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and shape [outputSize, inputSize] specifying input-to-cell part of + * weights for fully-connected layer inside the LSTM cell. + * Quantization zero point and scale must be the same across all the + * weights. + * * 4: The input-to-output weights. + * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and shape [outputSize, inputSize] specifying input-to-output part of + * weights for fully-connected layer inside the LSTM cell. + * Quantization zero point and scale must be the same across all the + * weights. + * * 5: The recurrent-to-input weights. + * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and shape [outputSize, outputSize] specifying recurrent-to-input part + * of weights for fully-connected layer inside the LSTM cell. + * Quantization zero point and scale must be the same across all the + * weights. + * * 6: The recurrent-to-forget weights. + * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and shape [outputSize, outputSize] specifying recurrent-to-forget + * part of weights for fully-connected layer inside the LSTM cell. + * Quantization zero point and scale must be the same across all the + * weights. + * * 7: The recurrent-to-cell weights. + * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and shape [outputSize, outputSize] specifying recurrent-to-cell part + * of weights for fully-connected layer inside the LSTM cell. + * Quantization zero point and scale must be the same across all the + * weights. + * * 8: The recurrent-to-output weights. + * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and shape [outputSize, outputSize] specifying recurrent-to-output + * part of weights for fully-connected layer inside the LSTM cell. + * Quantization zero point and scale must be the same across all the + * weights. + * * 9: The input gate bias. + * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape + * [outputSize] specifying the bias for the fully-connected layer + * inside the LSTM cell. Bias is quantized with scale being a product + * of input and weights scales and zeroPoint equal to 0. + * * 10:The forget gate bias. + * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape + * [outputSize] specifying the bias for the fully-connected layer + * inside the LSTM cell. Bias is quantized with scale being a product + * of input and weights scales and zeroPoint equal to 0. + * * 11:The cell bias. + * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape + * [outputSize] specifying the bias for the fully-connected layer + * inside the LSTM cell. Bias is quantized with scale being a product + * of input and weights scales and zeroPoint equal to 0. + * * 12:The output gate bias. + * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape + * [outputSize] specifying the bias for the fully-connected layer + * inside the LSTM cell. Bias is quantized with scale being a product + * of input and weights scales and zeroPoint equal to 0. + * * 13: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * and shape [numBatches, outputSize] specifying the cell state from the + * previous time step of the LSTM cell. It is quantized using a + * quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / + * 32768, zeroPoint = 0). + * * 14: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and shape [numBathes, outputSize] specifying the output of the LSTM + * cell from previous time-step. Tensor is quantized with a fixed + * quantization range of [-1, 127/128] (scale = 1/128, zeroPoint = + * 128). + * + * + * Outputs: + * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * and shape [numBatches, outputSize] which contains a cell state from + * the current time step. Tensor is quantized using a quantization + * range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint = + * 0). + * * 1: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and shape [numBathes, outputSize] which contains the output value. + * Tensor is quantized with a fixed quantization range of [-1, 127/128] + * (scale = 1/128, zeroPoint = 128). + */ + ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73, + + /** + * Draws samples from a multinomial distribution. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Inputs: + * * 0: A 2-D tensor with shape [batches, classes], specifying the + * unnormalized log-probabilities for all classes. + * * 1: A scalar {@link ANEURALNETWORKS_INT32}, specifying the number of + * independent samples to draw for each row slice. + * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [2], + * specifying seeds used to initialize the random distribution. If both + * provided seeds are 0, both will be randomly generated. + * Outputs: + * * 0: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape + * [batches, samples], containing the drawn samples. + * + * Available since API level 29. + */ + ANEURALNETWORKS_RANDOM_MULTINOMIAL = 74, + + /** + * Reduces a tensor by computing the "logical and" of elements along given + * dimensions. + * + * If keep_dims is true, the reduced dimensions are + * retained with length 1. Otherwise, the rank of the tensor is reduced by + * 1 for each entry in dimensions. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor. + * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions + * to reduce. Dimension values must be in the range [-n, n). + * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, + * retains reduced dimensions with length 1. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * If all dimensions are reduced and keep_dims is false, the output + * shape is [1]. + * + * Available since API level 29. + */ + ANEURALNETWORKS_REDUCE_ALL = 75, + + /** + * Reduces a tensor by computing the "logical or" of elements along given + * dimensions. + * + * If keep_dims is true, the reduced dimensions are + * retained with length 1. Otherwise, the rank of the tensor is reduced by + * 1 for each entry in dimensions. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor. + * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions + * to reduce. Dimension values must be in the range [-n, n). + * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, + * retains reduced dimensions with length 1. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * If all dimensions are reduced and keep_dims is false, the output + * shape is [1]. + * + * Available since API level 29. + */ + ANEURALNETWORKS_REDUCE_ANY = 76, + + /** + * Reduces a tensor by computing the maximum of elements along given + * dimensions. + * + * If keep_dims is true, the reduced dimensions are + * retained with length 1. Otherwise, the rank of the tensor is reduced by + * 1 for each entry in dimensions. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor. + * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions + * to reduce. Dimension values must be in the range [-n, n). + * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, + * retains reduced dimensions with length 1. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * If all dimensions are reduced and keep_dims is false, the output + * shape is [1]. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_REDUCE_MAX = 77, + + /** + * Reduces a tensor by computing the minimum of elements along given + * dimensions. + * + * If keep_dims is true, the reduced dimensions are + * retained with length 1. Otherwise, the rank of the tensor is reduced by + * 1 for each entry in dimensions. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor. + * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions + * to reduce. Dimension values must be in the range [-n, n). + * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, + * retains reduced dimensions with length 1. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * If all dimensions are reduced and keep_dims is false, the output + * shape is [1]. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_REDUCE_MIN = 78, + + /** + * Reduces a tensor by multiplying elements along given dimensions. + * + * If keep_dims is true, the reduced dimensions are + * retained with length 1. Otherwise, the rank of the tensor is reduced by + * 1 for each entry in dimensions. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor. + * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions + * to reduce. Dimension values must be in the range [-n, n). + * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, + * retains reduced dimensions with length 1. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * If all dimensions are reduced and keep_dims is false, the output + * shape is [1]. + * + * Available since API level 29. + */ + ANEURALNETWORKS_REDUCE_PROD = 79, + + /** + * Reduces a tensor by summing elements along given dimensions. + * + * If keep_dims is true, the reduced dimensions are + * retained with length 1. Otherwise, the rank of the tensor is reduced by + * 1 for each entry in dimensions. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: up to 4 + * + * Inputs: + * * 0: An n-D tensor. + * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions + * to reduce. Dimension values must be in the range [-n, n). + * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, + * retains reduced dimensions with length 1. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. + * If all dimensions are reduced and keep_dims is false, the output + * shape is [1]. + * + * Available since API level 29. + */ + ANEURALNETWORKS_REDUCE_SUM = 80, + + /** + * Select and scale the feature map of each region of interest to a unified + * output size by average pooling sampling points from bilinear interpolation. + * + * The region of interest is represented by its upper-left corner coordinate + * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. + * A spatial scaling factor is applied to map into feature map coordinate. + * A valid region of interest should satisfy x1 <= x2 and y1 <= y2. + * + * No rounding is applied in this operation. The sampling points are unified + * distributed in the pooling bin and their values are calculated by bilinear + * interpolation. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * + * Inputs: + * * 0: A 4-D tensor, specifying the feature map. + * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of + * the regions of interest, each line with format [x1, y1, x2, y2]. + * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, + * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, + * with zeroPoint of 0 and scale of 0.125. Zero num_rois is + * supported for this tensor. + * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape + * [num_rois], specifying the batch index of each box. Boxes with + * the same batch index are grouped together. Zero num_rois is + * supported for this tensor. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output + * height of the output tensor. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output + * width of the output tensor. + * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio + * from the height of original image to the height of feature map. + * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio + * from the width of original image to the width of feature map. + * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of + * sampling points in height dimension used to compute the output. + * Set to 0 for adaptive value of ceil(roi_height/out_height). + * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of + * sampling points in width dimension used to compute the output. + * Set to 0 for adaptive value of ceil(roi_width/out_width). + * * 9: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify + * NCHW data layout for input0 and output0. Set to false for NHWC. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. The output + * shape is [num_rois, out_height, out_width, depth]. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint can be different from the input0 scale and zeroPoint. + * + * Available since API level 29. + */ + ANEURALNETWORKS_ROI_ALIGN = 81, + + /** + * Select and scale the feature map of each region of interest to a unified + * output size by max-pooling. + * + * The region of interest is represented by its upper-left corner coordinate + * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. + * A spatial scaling factor is applied to map into feature map coordinate. + * A valid region of interest should satisfy x1 <= x2 and y1 <= y2. + * + * Rounding is applied in this operation to ensure integer boundary for + * regions of interest and pooling bins. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * + * Inputs: + * * 0: A 4-D tensor, specifying the feature map. + * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of + * the regions of interest, each line with format [x1, y1, x2, y2]. + * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, + * with zeroPoint of 0 and scale of 0.125. + * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape + * [num_rois], specifying the batch index of each box. Boxes with + * the same batch index are grouped together. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output + * height of the output tensor. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output + * width of the output tensor. + * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio + * from the height of original image to the height of feature map. + * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio + * from the width of original image to the width of feature map. + * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify + * NCHW data layout for input0 and output0. Set to false for NHWC. + * + * Outputs: + * * 0: A tensor of the same {@link OperandCode} as input0. The output + * shape is [num_rois, out_height, out_width, depth]. + * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_ROI_POOLING = 82, + + /** + * Computes reciprocal of square root of x element-wise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_RSQRT = 83, + + /** + * Using a tensor of booleans c and input tensors x and y select values + * elementwise from both input tensors: + * + * O[i] = C[i] ? x[i] : y[i]. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: A tensor of type {@link ANEURALNETWORKS_TENSOR_BOOL8} acting as a + * mask that chooses, based on the value at each element, whether the + * corresponding element in the output should be taken from input1 (if + * true) or input2 (if false). + * * 1: An input tensor of the same shape as input0. + * * 2: An input tensor of the same shape and type as input1. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scales and zeroPoint can be different from input1 scale and zeroPoint. + * + * Outputs: + * * 0: A tensor of the same type and shape as input1 and input2. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, + * the scale and zeroPoint can be different from inputs' scale and zeroPoint. + * + * Available since API level 29. + */ + ANEURALNETWORKS_SELECT = 84, + + /** + * Computes sin of x element-wise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_SIN = 85, + + /** + * Extracts a slice of specified size from the input tensor starting at a + * specified location. + * + * The starting location is specified as a 1-D tensor containing offsets + * for each dimension. The size is specified as a 1-D tensor containing + * either size of a slice along corresponding dimension or -1. In the latter + * case, all the remaining elements in dimension are included in the slice. + * + * A sum of begin offset and a size of a slice must not exceed size of a + * corresponding dimension. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: An n-D tensor to take slice from, may be zero-sized. + * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying + * the beginning indices of the slice in each dimension. + * * 2: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying + * the size of the slice in each dimension. + * + * Outputs: + * * 0: An n-D tensor of the same type as the input containing the slice. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * its scale and zeroPoint has to be same as the input0 scale and zeroPoint. + * + * Available since API level 29. + */ + ANEURALNETWORKS_SLICE = 86, + + /** + * Splits a tensor along a given axis into num_splits subtensors. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: An n-D tensor to split. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis along + * which to split. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar indicating the number of + * splits along given axis. Must evenly divide axis size. + * + * Outputs: + * * 0 ~ (num_splits - 1): Resulting subtensors. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_SPLIT = 87, + + /** + * Computes square root of x element-wise. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor. + * + * Outputs: + * * 0: The output tensor of same shape as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_SQRT = 88, + + /** + * Constructs a tensor by tiling a given tensor. + * + * This operation creates a new tensor by replicating `input` `multiples` + * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]` + * elements, and the values of `input` are replicated `multiples[i]` times + * along the i-th dimension. + * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: input, an n-D tensor specifying the input. + * * 1: multiples, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. + * The length of multiples must be n. + * + * Outputs: + * * 0: A tiled tensor of the same {@link OperandCode} and rank as `input`. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_TILE = 89, + + /** + * Finds values and indices of the k largest entries for the last dimension. + * + * Resulting values in each dimensions are sorted in descending order. If + * two values are equal, the one with larger index appears first. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: from 1 + * + * Inputs: + * * 0: input, an n-D tensor specifying the input. + * * 1: k, an {@link ANEURALNETWORKS_INT32} scalar, specifying the number of + * top elements to look for along the last dimension. + * + * Outputs: + * * 0: An n-D tensor of the same type as the input, containing the k + * largest elements along each last dimensional slice. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * * 1: An n-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} + * containing the indices of values within the last dimension of input. + * + * Available since API level 29. + */ + ANEURALNETWORKS_TOPK_V2 = 90, + + /** + * Performs the transpose of 2-D convolution operation. + * + * This operation is sometimes called "deconvolution" after Deconvolutional + * Networks, but is actually the transpose (gradient) of + * {@link ANEURALNETWORKS_CONV_2D} rather than an actual deconvolution. + * + * The output dimensions are functions of the filter dimensions, stride, and + * padding. + * + * Supported tensor {@link OperandCode} configurations: + * * 16 bit floating point: + * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. + * + * * 32 bit floating point: + * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. + * + * * Quantized: + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to + * * * input.scale * filter.scale). + * + * * Quantized with symmetric per channel quantization for the filter: + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, + * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). + * + * Available since API level 30: + * * Quantized signed (since API level 30): + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to + * * * input.scale * filter.scale). + * + * * Quantized signed with filter symmetric per channel quantization (since API level 30): + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. + * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. + * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, + * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * + * Both explicit padding and implicit padding are supported. + * + * Inputs (explicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], + * specifying the input. + * Since API level 29, zero batches is supported for this tensor. + * * 1: A 4-D tensor, of shape + * [depth_out, filter_height, filter_width, depth_in], specifying the + * filter. For tensor of type + * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel + * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0. + * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input + * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or + * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the + * same type. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, + * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, + * the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 + * and bias_scale of 0. The actual scale of each value 'i' is equal to + * bias_scale[i] = input_scale * filter_scale[i]. + * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the left, in the ‘width’ dimension. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the right, in the ‘width’ dimension. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the top, in the ‘height’ dimension. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on + * the bottom, in the ‘height’ dimension. + * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify + * NCHW data layout for input0 and output0. Set to false for NHWC. + * + * Inputs (implicit padding): + * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], + * specifying the input. + * Since API level 29, zero batches is supported for this tensor. + * * 1: A 4-D tensor, of shape + * [depth_out, filter_height, filter_width, depth_in], specifying the + * filter. For tensor of type + * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel + * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0. + * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input + * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or + * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the + * same type. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, + * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, + * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. + * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, + * the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 + * and bias_scale of 0. The actual scale of each value 'i' is equal to + * bias_scale[i] = input_scale * filter_scale[i]. + * * 3: An {@link ANEURALNETWORKS_TENSOR_INT32} tensor, specifying the output + * tensor shape. + * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit + * padding scheme, has to be one of the + * {@link PaddingCode} values. + * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘width’ dimension. + * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when + * walking through input in the ‘height’ dimension. + * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the + * {@link FuseCode} values. Specifies the activation to + * invoke on the result. + * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify + * NCHW data layout for input0 and output0. Set to false for NHWC. + * + * Outputs: + * * 0: The output 4-D tensor, of shape + * [batches, out_height, out_width, depth_out]. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint can be different from inputs' scale and zeroPoint. + * + * Available since API level 29. + */ + ANEURALNETWORKS_TRANSPOSE_CONV_2D = 91, + + /** + * A recurrent neural network specified by an LSTM cell. + * + * Performs (fully) dynamic unrolling of input. + * + * This Op unrolls the input along the time dimension, and implements the + * following operation for each element in the sequence + * s = 1...sequence_length: + * outputs[s] = projection(state = activation(LSTMOp(inputs[s]))) + * + * Where LSTMOp is the LSTM op as in {@link ANEURALNETWORKS_LSTM}, + * the "projection" is an optional projection layer from state and output + * and the “activation” is the function passed as the + * “fused_activation_function” argument (if not “NONE”). + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: 3, either time-major or batch-major. + * + * All input and output tensors must be of the same type. + * + * Inputs: + * * 0: The input (\f$x_t\f$). + * A 3-D tensor of shape: + * If time-major: [max_time, batch_size, input_size] + * If batch-major: [batch_size, max_time, input_size] + * where “max_time” is the number of timesteps (sequence length), + * “batch_size” corresponds to the batching dimension, and + * “input_size” is the size of the input. + * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. + * A 2-D tensor of shape [num_units, input_size], where “num_units” + * corresponds to the number of cell units. + * * 2: The input-to-forget weights (\f$W_{xf}\f$). + * A 2-D tensor of shape [num_units, input_size]. + * * 3: The input-to-cell weights (\f$W_{xc}\f$). + * A 2-D tensor of shape [num_units, input_size]. + * * 4: The input-to-output weights (\f$W_{xo}\f$). + * A 2-D tensor of shape [num_units, input_size]. + * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. + * A 2-D tensor of shape [num_units, output_size], where “output_size” + * corresponds to either the number of cell units (i.e., “num_units”), + * or the second dimension of the “projection_weights”, if defined. + * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). + * A 2-D tensor of shape [num_units, output_size]. + * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). + * A 2-D tensor of shape [num_units, output_size]. + * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). + * A 2-D tensor of shape [num_units, output_size]. + * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. + * A 1-D tensor of shape [num_units]. + * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. + * A 1-D tensor of shape [num_units]. + * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. + * A 1-D tensor of shape [num_units]. + * * 12:The input gate bias (\f$b_i\f$). Optional. + * A 1-D tensor of shape [num_units]. + * * 13:The forget gate bias (\f$b_f\f$). + * A 1-D tensor of shape [num_units]. + * * 14:The cell bias (\f$b_c\f$). + * A 1-D tensor of shape [num_units]. + * * 15:The output gate bias (\f$b_o\f$). + * A 1-D tensor of shape [num_units]. + * * 16:The projection weights (\f$W_{proj}\f$). Optional. + * A 2-D tensor of shape [output_size, num_units]. + * * 17:The projection bias (\f$b_{proj}\f$). Optional. + * A 1-D tensor of shape [output_size]. + * * 18:The output state (in) (\f$h_{t-1}\f$). + * A 2-D tensor of shape [batch_size, output_size]. + * * 19:The cell state (in) (\f$C_{t-1}\f$). + * A 2-D tensor of shape [batch_size, num_units]. + * * 20:The activation function (\f$g\f$). + * A value indicating the activation function: + * <ul> + * <li>0: None; + * <li>1: Relu; + * <li>3: Relu6; + * <li>4: Tanh; + * <li>6: Sigmoid. + * </ul> + * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such + * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 + * then clipping is disabled. + * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the + * projection layer, such that values are bound within + * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. + * * 23:Time-major if true, batch-major if false. + * * 24:The input layer normalization weights. Optional. + * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs + * to activation at input gate. + * * 25:The forget layer normalization weights. Optional. + * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs + * to activation at forget gate. + * * 26:The cell layer normalization weights. Optional. + * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs + * to activation at cell gate. + * * 27:The output layer normalization weights. Optional. + * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs + * to activation at output gate. + * + * Outputs: + * * 0: The output (\f$o_t\f$). + * A 3-D tensor of shape: + * If time-major: [max_time, batch_size, output_size] + * If batch-major: [batch_size, max_time, output_size] + * * 1: A tensor of shape [batch_size, output_size] containing a hidden + * state from the last time step in the sequence. This output is + * optional and can be omitted. If this output is present then + * output #2 must be present as well. + * Available since API level 30. + * * 2: A tensor of shape [batch_size, cell_size] containing a cell state + * from the last time step in the sequence. This output is optional + * and can be omitted. + * Available since API level 30. + * + * Available since API level 29. + * + * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI + * does not maintain internal states. This operator does not support the usage pattern in which + * multiple cells are chained and state tensors are propagated. + */ + ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92, + + /** + * A recurrent neural network layer that applies a basic RNN cell to a + * sequence of inputs. + * + * This layer unrolls the input along the sequence dimension, and implements + * the following operation + * for each element in the sequence s = 1...sequence_length: + * outputs[s] = state = activation(inputs[s] * input_weights’ + state * + * recurrent_weights’ + bias) + * + * Where: + * * “input_weights” is a weight matrix that multiplies the inputs; + * * “recurrent_weights” is a weight matrix that multiplies the current + * “state” which itself is the output from the previous time step + * computation; + * * “bias” is a bias vector (added to each output vector in the batch); + * * “activation” is the function passed as the “fused_activation_function” + * argument (if not “NONE”). + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * The input tensors must all be the same type. + * + * Inputs: + * * 0: input. + * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If + * it is set to 1, then the input has a shape [maxTime, batchSize, + * inputSize], otherwise the input has a shape [batchSize, maxTime, + * inputSize]. + * * 1: weights. + * A 2-D tensor of shape [numUnits, inputSize]. + * * 2: recurrent_weights. + * A 2-D tensor of shape [numUnits, numUnits]. + * * 3: bias. + * A 1-D tensor of shape [numUnits]. + * * 4: hidden state + * A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden + * state input for the first time step of the computation. + * * 5: fusedActivationFunction. + * A {@link FuseCode} value indicating the activation function. If + * “NONE” is specified then it results in a linear activation. + * * 6: timeMajor + * An {@link ANEURALNETWORKS_INT32} scalar specifying the shape format + * of input and output tensors. Must be set to either 0 or 1. + * Outputs: + * * 0: output. + * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If + * it is set to 1, then the output has a shape [maxTime, batchSize, + * numUnits], otherwise the output has a shape [batchSize, maxTime, + * numUnits]. + * * 1: A tensor of shape [batchSize, numUnits] containing hidden state + * from the last time step in the sequence. This output is optional + * and can be omitted. + * Available since API level 30. + * + * Available since API level 29. + * + * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI + * does not maintain internal states. This operator does not support the usage pattern in which + * multiple cells are chained and state tensors are propagated. + */ + ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93, + + /** + * Resizes images to given size using the nearest neighbor interpretation. + * + * Resized images must be distorted if their output aspect ratio is not the + * same as input aspect ratio. The corner pixels of output may not be the + * same as corner pixels of input. + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since API level 30) + * + * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. + * With the default data layout NHWC, the data is stored in the order of: + * [batch, height, width, channels]. Alternatively, the data layout could + * be NCHW, the data storage order of: [batch, channels, height, width]. + * + * Both resizing by shape and resizing by scale are supported. + * + * Inputs (resizing by shape): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. Zero batches is supported for this tensor. + * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output + * width of the output tensor. + * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output + * height of the output tensor. + * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} + * scalar, default to false. If True, the centers of the 4 corner + * pixels of the input and output tensors are aligned, preserving the + * values at the corner pixels. + * Available since API level 30. + * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} + * scalar, default to false. If True, the pixel centers are assumed to + * be at (0.5, 0.5). This is the default behavior of image.resize in + * TF 2.0. If this parameter is True, then align_corners parameter + * must be False. + * Available since API level 30. + * + * Inputs (resizing by scale): + * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying + * the input. Zero batches is supported for this tensor. + * * 1: A scalar, specifying width_scale, the scaling factor of the width + * dimension from the input tensor to the output tensor. The output + * width is calculated as new_width = floor(width * width_scale). + * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is + * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of + * {@link ANEURALNETWORKS_FLOAT32} otherwise. + * * 2: A scalar, specifying height_scale, the scaling factor of the height + * dimension from the input tensor to the output tensor. The output + * height is calculated as new_height = floor(height * height_scale). + * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is + * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of + * {@link ANEURALNETWORKS_FLOAT32} otherwise. + * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false. + * Set to true to specify NCHW data layout for input0 and output0. + * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} + * scalar, default to false. If True, the centers of the 4 corner + * pixels of the input and output tensors are aligned, preserving the + * values at the corner pixels. + * Available since API level 30. + * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} + * scalar, default to false. If True, the pixel centers are assumed to + * be at (0.5, 0.5). This is the default behavior of image.resize in + * TF 2.0. If this parameter is True, then align_corners parameter + * must be False. + * Available since API level 30. + * + * Outputs: + * * 0: The output 4-D tensor, of shape + * [batches, new_height, new_width, depth]. + * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and + * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + * + * Available since API level 29. + */ + ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94, + + // Operations below are available since API level 30. + + /** + * Quantized version of {@link ANEURALNETWORKS_LSTM}. + * + * The input and the output use asymmetric quantized types, while the rest + * use symmetric ones. + * + * Inputs: + * * 0: The input to the LSTM cell. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} + * Shape: [batchSize, inputSize] + * * 1: The input-to-input weights. Optional. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} + * Shape: [numUnits, inputSize] + * * 2: The input-to-forget weights. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} + * Shape: [numUnits, inputSize] + * * 3: The input-to-cell weights. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} + * Shape: [numUnits, inputSize] + * * 4: The input-to-output weights. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} + * Shape: [numUnits, inputSize] + * * 5: The recurrent-to-input weights. Optional. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} + * Shape: [numUnits, outputSize] + * * 6: The recurrent-to-forget weights. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} + * Shape: [numUnits, outputSize] + * * 7: The recurrent-to-cell weights. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} + * Shape: [numUnits, outputSize] + * * 8: The recurrent-to-output weights. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} + * Shape: [numUnits, outputSize] + * * 9: The cell-to-input weights (for peephole). Optional. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * Shape: [numUnits] + * * 10: The cell-to-forget weights (for peephole). Optional. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * Shape: [numUnits] + * * 11: The cell-to-output weights (for peephole). Optional. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * Shape: [numUnits] + * * 12: The input gate bias. Quantized with scale being the + * product of input and weights scales and zeroPoint equal to 0. + * Optional. + * Type: {@link ANEURALNETWORKS_TENSOR_INT32} + * Shape: [numUnits] + * * 13: The forget gate bias. Quantized with scale being the + * product of input and weights scales and zeroPoint equal to 0. + * Type: {@link ANEURALNETWORKS_TENSOR_INT32} + * Shape: [numUnits] + * * 14: The cell bias. Quantized with scale being the + * product of input and weights scales and zeroPoint equal to 0. + * Type: {@link ANEURALNETWORKS_TENSOR_INT32} + * Shape: [numUnits] + * * 15: The output gate bias. Quantized with scale being the + * product of input and weights scales and zeroPoint equal to 0. + * Type: {@link ANEURALNETWORKS_TENSOR_INT32} + * Shape: [numUnits] + * * 16: The projection weights. Optional. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} + * Shape: [outputSize, numUnits] + * * 17: The projection bias. Quantized with scale being the + * product of input and weights scales and zeroPoint equal to 0. + * Optional. + * Type: {@link ANEURALNETWORKS_TENSOR_INT32} + * Shape: [outputSize] + * * 18: The output from the previous time step. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} + * Shape: [batchSize, outputSize] + * * 19: The cell state from the previous time step. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * Shape: [batchSize, numUnits] + * * 20: The input layer normalization weights. Used to rescale + * normalized inputs to activation at input gate. Optional. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * Shape: [numUnits] + * * 21: The forget layer normalization weights. Used to + * rescale normalized inputs to activation at forget gate. Optional. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * Shape: [numUnits] + * * 22: The cell layer normalization weights. Used to rescale + * normalized inputs to activation at cell gate. Optional. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * Shape: [numUnits] + * * 23: The output layer normalization weights. Used to + * rescale normalized inputs to activation at output gate. Optional. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * Shape: [numUnits] + * * 24: The cell clip. If provided the cell state is clipped + * by this value prior to the cell output activation. Optional. + * Type: {@link ANEURALNETWORKS_FLOAT32}. + * * 25: The projection clip. If provided and projection is enabled, + * this is used for clipping the projected values. Optional. + * Type: {@link ANEURALNETWORKS_FLOAT32}. + * * 26: The scale of the intermediate result of matmul, + * i.e. input to layer normalization, at input gate. + * Type: {@link ANEURALNETWORKS_FLOAT32}. + * * 27: The scale of the intermediate result of matmul, + * i.e. input to layer normalization, at forget gate. + * Type: {@link ANEURALNETWORKS_FLOAT32}. + * * 28: The scale of the intermediate result of matmul, + * i.e. input to layer normalization, at cell gate. + * Type: {@link ANEURALNETWORKS_FLOAT32}. + * * 29: The scale of the intermediate result of matmul, + * i.e. input to layer normalization, at output gate. + * Type: {@link ANEURALNETWORKS_FLOAT32}. + * * 30: The zero point of the hidden state, i.e. input to + * projection. + * Type: {@link ANEURALNETWORKS_INT32}. + * * 31: The scale of the hidden state, i.e. input to + * projection. + * Type: {@link ANEURALNETWORKS_FLOAT32}. + * + * Outputs: + * * 0: The output state (out). + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} + * Shape: [batchSize, outputSize] + * * 1: The cell state (out). + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * Shape: [batchSize, numUnits] + * * 2: The output. This is effectively the same as the current + * "output state (out)" value. + * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} + * Shape: [batchSize, outputSize] + * + * Available since API level 30. + */ + ANEURALNETWORKS_QUANTIZED_LSTM = 95, + + /** + * Executes one of the two referenced models as determined by a boolean + * value. + * + * The inputs and outputs of the two referenced models must agree with the + * signature of this operation. That is, if the operation has (3 + n) inputs + * and m outputs, both models must have n inputs and m outputs with the same + * types, ranks (if specified), dimensions (if specified), scales, + * zeroPoints, and other operand parameters as the corresponding operation + * inputs and outputs. + * + * Inputs: + * * 0: A value of type {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1] + * that determines which of the two referenced models to execute. + * The operand must have fully specified dimensions. + * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the model to be + * executed if the condition is true. + * * 2: A {@link ANEURALNETWORKS_MODEL} reference to the model to be + * executed if the condition is false. + * * 3 ~ (n + 2): Inputs to be passed to the model selected for execution. + * + * Outputs: + * * 0 ~ (m - 1): Outputs produced by the selected model. + * + * Available since API level 30. + */ + ANEURALNETWORKS_IF = 96, + + /** + * Executes the body model until the condition model outputs false. + * + * The inputs to this operation are the condition model, the body model, + * and operand values for the first iteration of the loop. The values are + * implicitly split into three groups of input-output, state-only, and + * input-only values, as described below. + * + * The outputs of this operation are the final values of input-output + * operands. + * + * Both the condition and body model receive (m + k + n) inputs. + * * The first m (m >= 1) inputs are input-output operands. For the first + * iteration, these are initialized from the corresponding inputs of the + * WHILE operation. In subsequent iterations, their values come from the + * corresponding outputs of the body model produced during the previous + * iteration. + * * The next k (k >= 0) inputs are state-only operands. They are similar to + * the input-output operands, except that their values are no longer + * available after the loop terminates. + * * The last n (n >= 0) inputs are input-only operands. Their values come + * from the corresponding inputs of the WHILE operation. + * + * The body model produces (m + k) outputs. + * * The first m outputs are input-output operands. They become the outputs + * of the WHILE operation when a termination condition is reached. + * * The last k outputs are state-only operands. Their values are no longer + * available after the loop terminates. + * + * The numbers m, k, and n are inferred by the runtime as follows: + * m = (WHILE operation output count) + * k = (body model output count) - m + * n = (body model input count) - m - k + * + * The pseudo-code below illustrates the flow of a WHILE operation with + * inputs condition, body, initial_input_output, initial_state, input_only + * (m = 1, k = 1, n = 1): + * + * input_output = initial_input_output + * state = initial_state + * while condition(input_output, state, input_only): + * input_output, state = body(input_output, state, input_only) + * return input_output + * + * To prevent infinite loops, there is an implicit execution timeout + * associated with each loop ("loop timeout duration"). See {@link + * ANeuralNetworksExecution_setLoopTimeout}. + * + * Inputs: + * * 0: A {@link ANEURALNETWORKS_MODEL} reference to the condition + * model. The model must have (m + k + n) inputs with + * the same types, ranks (if specified), dimensions (if specified), + * scales, zeroPoints, and other operand parameters as the + * corresponding inputs of the WHILE operation and exactly one output + * of {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1]. + * The output operand must have fully specified dimensions. + * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the body model. + * The model must have (m + k + n) inputs and (m + k) outputs with + * the same types, ranks (if specified), dimensions (if specified), + * scales, zeroPoints, and other operand parameters as the + * corresponding inputs and outputs of the WHILE operation. + * * (m inputs): Initial values for input-output operands. + * * (k inputs): Initial values for state-only operands. + * * (n inputs): Values for input-only operands. + * + * Outputs: + * * 0 ~ (m - 1): Outputs produced by the loop. + * + * Available since API level 30. + */ + ANEURALNETWORKS_WHILE = 97, + + /** + * Computes exponential linear activation on the input tensor element-wise. + * + * The output is calculated using the following formula: + * + * ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1)) + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor, specifying the input. May be zero-sized. + * * 1: A scalar, specifying the alpha parameter. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, + * the alpha value must be of {@link ANEURALNETWORKS_FLOAT16}. + * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, + * the alpha value must be of {@link ANEURALNETWORKS_FLOAT32}. + * + * Outputs: + * * 0: The output tensor of same shape and type as input0. + * + * Available since API level 30. + */ + ANEURALNETWORKS_ELU = 98, + + /** + * Computes hard-swish activation on the input tensor element-wise. + * + * Hard swish activation is introduced in + * https://arxiv.org/pdf/1905.02244.pdf + * + * The output is calculated using the following formula: + * + * h-swish(x) = x * max(0, min(6, (x + 3))) / 6 + + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A tensor, specifying the input. May be zero-sized. + * + * Outputs: + * * 0: The output tensor of same shape and type as input0. + * Scale and zero point of this tensor may be different from the input + * tensor's parameters. + * + * Available since API level 30. + */ + ANEURALNETWORKS_HARD_SWISH = 99, + + /** + * Creates a tensor filled with a scalar value. + * + * Supported output tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: A 1-D tensor, specifying the desired output tensor shape. + * * 1: A scalar, specifying the value to fill the output tensors with. + * For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, + * the scalar must be of {@link ANEURALNETWORKS_FLOAT16}. + * For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, + * the scalar must be of {@link ANEURALNETWORKS_FLOAT32}. + * For output tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, + * the scalar must be of {@link ANEURALNETWORKS_INT32}. + * + * Outputs: + * * 0: The output tensor. + * + * Available since API level 30. + */ + ANEURALNETWORKS_FILL = 100, + + /** + * Returns the rank of a tensor. + * + * The rank of a tensor is the number of dimensions in it. Also known as + * "order", "degree", "ndims". + * + * Supported tensor {@link OperandCode}: + * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} + * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * * {@link ANEURALNETWORKS_TENSOR_INT32} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} + * * {@link ANEURALNETWORKS_TENSOR_BOOL8} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} + * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} + * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: The input tensor. + * + * Outputs: + * * 0: A scalar of {@link ANEURALNETWORKS_INT32}, specifying the rank + * of the input tensor. + * + * Available since API level 30. + */ + ANEURALNETWORKS_RANK = 101, +} OperationCode; + +/** + * Fused activation function types. + * + * + * Available since API level 27. + */ +typedef enum { + /** NO fused activation function. */ + ANEURALNETWORKS_FUSED_NONE = 0, + /** Fused ReLU activation function. */ + ANEURALNETWORKS_FUSED_RELU = 1, + /** Fused ReLU1 activation function. */ + ANEURALNETWORKS_FUSED_RELU1 = 2, + /** Fused ReLU6 activation function. */ + ANEURALNETWORKS_FUSED_RELU6 = 3, +} FuseCode; + +/** + * Implicit padding algorithms. + * + * + * Available since API level 27. + */ +typedef enum { + /** + * SAME padding. + * Padding on both ends are the "same": + * padding_to_beginning = total_padding / 2 + * padding_to_end = (total_padding + 1)/2. + * i.e., for even number of padding, padding to both ends are exactly + * the same; for odd number of padding, padding to the ending is bigger + * than the padding to the beginning by 1. + * + * total_padding is a function of input, stride, dilation and filter size. + * It could be computed as follows: + * out_size = (input + stride - 1) / stride + * effective_filter_size = (filter_size - 1) * dilation + 1 + * needed_input = (out_size - 1) * stride + effective_filter_size + * total_padding = max(0, needed_input - input_size) + * The computation is the same for the horizontal and vertical directions. + */ + ANEURALNETWORKS_PADDING_SAME = 1, + + /** + * VALID padding. + * No padding. When the input size is not evenly divisible by + * the filter size, the input at the end that could not fill + * the whole filter tile will simply be ignored. + */ + ANEURALNETWORKS_PADDING_VALID = 2, +} PaddingCode; + +/** + * Execution preferences. + * + * Available since API level 27. + */ +typedef enum { + /** + * Prefer executing in a way that minimizes battery drain. + * This is desirable for compilations that will be executed often. + */ + ANEURALNETWORKS_PREFER_LOW_POWER = 0, + /** + * Prefer returning a single answer as fast as possible, even if this causes + * more power consumption. + */ + ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1, + /** + * Prefer maximizing the throughput of successive frames, for example when + * processing successive frames coming from the camera. + */ + ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2, +} PreferenceCode; + +/** + * Device types. + * + * The type of NNAPI device. + */ +typedef enum { + /** The device type cannot be provided. */ + ANEURALNETWORKS_DEVICE_UNKNOWN = 0, + /** The device does not fall into any category below. */ + ANEURALNETWORKS_DEVICE_OTHER = 1, + /** The device runs NNAPI models on single or multi-core CPU. */ + ANEURALNETWORKS_DEVICE_CPU = 2, + /** The device can run NNAPI models and also accelerate graphics APIs such + * as OpenGL ES and Vulkan. */ + ANEURALNETWORKS_DEVICE_GPU = 3, + /** Dedicated accelerator for Machine Learning workloads. */ + ANEURALNETWORKS_DEVICE_ACCELERATOR = 4, +} DeviceTypeCode; + +/** + * Result codes. + * + * <p>Any NNAPI function can return any result code, including result codes not + * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR} + * indicates a failure of some kind.</p> + * + * <p>Additional information about the nature of a failure can be obtained from + * the device log after enabling NNAPI debugging by setting the debug.nn.vlog + * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p> + * + * Available since API level 27. + */ +typedef enum { + /** + * Operation was succesful. + */ + ANEURALNETWORKS_NO_ERROR = 0, + + /** + * Failure caused by not enough available memory. + */ + ANEURALNETWORKS_OUT_OF_MEMORY = 1, + + ANEURALNETWORKS_INCOMPLETE = 2, + + /** + * Failure caused by unexpected null argument. + */ + ANEURALNETWORKS_UNEXPECTED_NULL = 3, + + /** + * Failure caused by invalid function arguments, invalid model definition, + * invalid execution definition or invalid data at execution time. + */ + ANEURALNETWORKS_BAD_DATA = 4, + + /** + * Failure caused by failed model execution. + */ + ANEURALNETWORKS_OP_FAILED = 5, + + /** + * Failure caused by object being in the wrong state. + */ + ANEURALNETWORKS_BAD_STATE = 6, + + /** + * Failure caused by not being able to map a file into memory. + * This may be caused by a file descriptor not being mappable, or an AHardwareBuffer + * not supported by the device. + * Mitigate by reading its content into memory. + */ + ANEURALNETWORKS_UNMAPPABLE = 7, + + /** + * Failure caused by insufficient buffer size provided to a model output. + */ + ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8, + + /** + * Failure caused by a device not being available. + */ + ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9, + + /** + * Failure because a deadline could not be met for a task, but future + * deadlines may still be met for the same task after a short delay. + * + * Available since API level 30. + */ + ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10, + + /** + * Failure because a deadline could not be met for a task, and future + * deadlines will likely also not be met for the same task even after a + * short delay. + * + * Available since API level 30. + */ + ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11, + + /** + * Failure because of a resource limitation within the driver, but future + * calls for the same task may still succeed after a short delay. + * + * Available since API level 30. + */ + ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12, + + /** + * Failure because of a resource limitation within the driver, and future + * calls for the same task will likely also fail even after a short + * delay. + * + * Available since API level 30. + */ + ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13, + + /** + * Failure indicating an object is in a dead state. + * + * Available since API level 30. + */ + ANEURALNETWORKS_DEAD_OBJECT = 14, +} ResultCode; + +/** + * For {@link ANeuralNetworksModel_setOperandValue}, values with a + * length smaller or equal to this will be immediately copied into + * the model. The size is in bytes. + * + * Available since API level 27. + */ +enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 }; + +/** + * For {@link ANeuralNetworksCompilation_setCaching}, specify the size + * of the cache token required from the application. The size is in bytes. + * + * Available since API level 29. + */ +enum { ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN = 32 }; + +/** + * Different duration measurements. + * + * Durations are measured in nanoseconds. + * + * Available since API level 29. + */ +typedef enum { + // Execution time on hardware (not driver, which runs on host processor). + ANEURALNETWORKS_DURATION_ON_HARDWARE = 0, + // Execution time in driver (including time on hardware). Excludes overhead + // such as that of the runtime itself and the IPC needed for the runtime to + // communicate with the driver. + ANEURALNETWORKS_DURATION_IN_DRIVER = 1, + // Execution time on hardware, after all dependencies have been signaled. + // If no dependencies specified (for example, if the execution was scheduled other + // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the + // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE. + // Available since API level 30. + ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2, + // Execution time in driver, after all dependencies have been signaled. Excludes + // overhead such as that of the runtime itself and the IPC needed for the runtime + // to communicate with the driver. + // If no dependencies specified (for example, if the execution was scheduled other + // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the + // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER. + // Available since API level 30. + ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3, +} DurationCode; + +/** + * Relative execution priority. + * + * Available since API level 30. + */ +typedef enum { + ANEURALNETWORKS_PRIORITY_LOW = 90, + ANEURALNETWORKS_PRIORITY_MEDIUM = 100, + ANEURALNETWORKS_PRIORITY_HIGH = 110, + ANEURALNETWORKS_PRIORITY_DEFAULT = ANEURALNETWORKS_PRIORITY_MEDIUM, +} PriorityCode; + +/** + * ANeuralNetworksMemory is an opaque type that represents memory. + * + * This type is used to represent shared memory, memory mapped files, + * and similar memories. + * + * By using shared memory, a program can efficiently communicate to the + * runtime and drivers the tensors that define a model. See + * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application + * should typically create one shared memory object that contains every constant tensor + * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be used to + * create shared memory from a file handle. + * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} can be used to + * create shared memory from an AHardwareBuffer handle. + * + * Memory objects can also be used to specify the input and output arguments of + * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory} + * and {@link ANeuralNetworksExecution_setOutputFromMemory}. + * + * When calling {@link ANeuralNetworksModel_setOperandValueFromMemory}, + * {@link ANeuralNetworksExecution_setInputFromMemory} and + * {@link ANeuralNetworksExecution_setOutputFromMemory}, each operand in the shared + * memory object must be aligned on a boundary of a byte size that is a multiple + * of the element type byte size, e.g., a tensor with + * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary. + * + * It is the application's responsibility to ensure that there are no uses of + * the memory after calling {@link ANeuralNetworksMemory_free}. This includes + * any model which references this memory because of a call to + * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation + * created using such a model, any execution object or burst object created + * using such a compilation, or any execution which references this memory + * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or + * {@link ANeuralNetworksExecution_setOutputFromMemory}. + * + * Available since API level 27. + * + * Starting at API level 30, the application may request creation of device native memory from + * {@link ANeuralNetworksMemoryDesc} to avoid potential memory copying and transformation + * overhead between executions. See also {@link ANeuralNetworksMemoryDesc} and + * {@link ANeuralNetworksMemory_createFromDesc}. + */ +typedef struct ANeuralNetworksMemory ANeuralNetworksMemory; + +/** + * ANeuralNetworksModel is an opaque type that contains a description of the + * mathematical operations that constitute the model. + * + * <p>Build the model by calling<ul> + * <li>{@link ANeuralNetworksModel_create}</li> + * <li>{@link ANeuralNetworksModel_addOperation}</li> + * <li>{@link ANeuralNetworksModel_addOperand}</li> + * </ul> + * + * This forms a graph in which each operation and operand is a node, a + * directed edge from an operand to an operation indicates that the + * operand is an input to the operation, and a directed edge from an + * operation to an operand indicates that the operand is an output + * from the operation. This graph must be acyclic. + * + * A model is completed by calling {@link ANeuralNetworksModel_finish}. + * A model is destroyed by calling {@link ANeuralNetworksModel_free}. + * + * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish} + * has been called on it.</p> + * + * <p>It is the application's responsibility to make sure that only one thread + * modifies a model at a given time. It is however safe for more than one + * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p> + * + * <p>It is also the application's responsibility to ensure that there are no + * other uses of the model after calling {@link ANeuralNetworksModel_free}. + * This includes any compilation, execution object or burst object created using + * the model.</p> + * + * Available since API level 27. + */ +typedef struct ANeuralNetworksModel ANeuralNetworksModel; + +/** + * ANeuralNetworksCompilation is an opaque type that can be used to compile + * a machine learning model. + * + * <p>To use:<ul> + * <li>Create a new compilation instance by calling the + * {@link ANeuralNetworksCompilation_create} function or + * {@link ANeuralNetworksCompilation_createForDevices}.</li> + * <li>Set any desired properties on the compilation (for example, + * {@link ANeuralNetworksCompilation_setPreference}).</li> + * <li>Optionally, set the caching signature and the cache directory on the + * compilation by calling {@link ANeuralNetworksCompilation_setCaching}.</li> + * <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li> + * <li>Use the compilation as many times as needed + * with {@link ANeuralNetworksExecution_create} and + * {@link ANeuralNetworksBurst_create}.</li> + * <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free} + * once all executions using the compilation have completed.</li></ul></p> + * + * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}. + * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}. + * + * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish} + * has been called on it.</p> + * + * <p>It is the application's responsibility to make sure that only + * one thread modifies a compilation at a given time. It is however + * safe for more than one thread to use the compilation once + * {@link ANeuralNetworksCompilation_finish} has returned.</p> + * + * <p>It is also the application's responsibility to ensure that there are no other + * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}. + * This includes any execution object or burst object created using the compilation, + * or any memory descriptor with the compilation as part of one of the roles specified by + * {@link ANeuralNetworksMemoryDesc_addInputRole} or + * {@link ANeuralNetworksMemoryDesc_addOutputRole}.</p> + * + * Available since API level 27. + */ +typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; + +/** + * ANeuralNetworksExecution is an opaque type that can be used to apply a machine + * learning model to a set of inputs. + * + * <p>To use:<ul> + * <li>Create a new execution instance by calling the + * {@link ANeuralNetworksExecution_create} function.</li> + * <li>Associate input buffers or memory regions to the model inputs with + * {@link ANeuralNetworksExecution_setInput} or + * {@link ANeuralNetworksExecution_setInputFromMemory}.</li> + * <li>Associate output buffers or memory regions to the model outputs with + * {@link ANeuralNetworksExecution_setOutput} or + * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li> + * <li>Apply the model with one of the following:</li><ul> + * <li>Asynchronously with {@link ANeuralNetworksExecution_startCompute} + * or with {@link ANeuralNetworksExecution_startComputeWithDependencies}, + * waiting for the execution to complete with + * {@link ANeuralNetworksEvent_wait}.</li> + * <li>Synchronously with {@link ANeuralNetworksExecution_compute}.</li> + * <li>Synchronously as part of an execution burst with + * {@link ANeuralNetworksExecution_burstCompute}.</li></ul> + * <li>Destroy the execution with + * {@link ANeuralNetworksExecution_free}.</li></ul></p> + * + * <p>An output buffer or memory region must not overlap with any + * other output buffer or memory region, with an input buffer or + * memory region, or with an operand value in a memory object + * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p> + * + * <p>An execution cannot be modified once + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies} has been called on it.</p> + * + * <p>An execution can be applied to a model with + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies} only once. Create new + * executions to do new evaluations of the model.</p> + * + * <p>It is the application's responsibility to make sure that only one thread + * modifies an execution at a given time. It is however safe for more than one + * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p> + * + * <p>It is also the application's responsibility to ensure that the execution + * either has never been scheduled or has completed (i.e., that + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, or + * {@link ANeuralNetworksEvent_wait} has returned) before calling + * {@link ANeuralNetworksExecution_free}.</p>. + * + * <p>It is also the application's responsibility to ensure that there are no other + * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p> + * + * <p>Multiple executions can be scheduled and evaluated concurrently, either by + * means of {@link ANeuralNetworksExecution_compute} or + * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous) in + * different threads, or by means of + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies} (which are asynchronous). + * (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on + * different burst objects.) The runtime makes no guarantee on the ordering of + * completion of executions. If it's important to the application, the + * application should enforce the ordering by ensuring that one execution + * completes before the next is scheduled (for example, by scheduling all + * executions synchronously within a single thread, or by scheduling all + * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between + * calls to {@link ANeuralNetworksExecution_startCompute}); or by using + * {@link ANeuralNetworksExecution_startComputeWithDependencies} to make the execution wait for a + * list of events to be signaled before starting the actual evaluation.</p> + * + * Available since API level 27. + */ +typedef struct ANeuralNetworksExecution ANeuralNetworksExecution; + +#if __ANDROID_API__ >= 29 +/** + * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand. + */ +typedef struct ANeuralNetworksSymmPerChannelQuantParams { + /* The index of the channel dimension. */ + uint32_t channelDim; + /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */ + uint32_t scaleCount; + /** The array of scaling values for each channel. Each value must be greater than zero. */ + const float* scales; +} ANeuralNetworksSymmPerChannelQuantParams; + +/** + * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency + * of a rapid sequence of executions. It will likely cause overhead if only used + * for a single execution. + * + * ANeuralNetworksBurst serves as a context object for any number of inferences + * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst + * object and the {@link ANeuralNetworksExecution} objects used with it must all + * have been created from the same {@link ANeuralNetworksCompilation} object. + * + * This object is also used as a hint to drivers, providing insight to the + * lifetime of a rapid sequence of executions. For example, a driver may choose + * to increase the clock frequency of its accelerator for the lifetime of a + * burst object. + * + * <p>To use:<ul> + * <li>Create a new burst object by calling the + * {@link ANeuralNetworksBurst_create} function.</li> + * <li>For each execution:</li><ul> + * <li>Create {@link ANeuralNetworksExecution} and configure its + * properties (see {@link ANeuralNetworksExecution} for details).</li> + * <li>Apply the model synchronously with + * {@link ANeuralNetworksExecution_burstCompute}, reusing the same + * {@link ANeuralNetworksBurst} with the new + * {@link ANeuralNetworksExecution}.</li> + * <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul> + * <li>Destroy the burst with + * {@link ANeuralNetworksBurst_free}.</li></ul></p> + * + * Available since API level 29. + */ +typedef struct ANeuralNetworksBurst ANeuralNetworksBurst; +#endif // __ANDROID_API__ >= 29 + +/** + * ANeuralNetworksOperandType describes the type of an operand. + * + * This structure is used to describe both scalars and tensors. + * + * A tensor operand type with all dimensions specified is "fully + * specified". Whenever possible (i.e., whenever the dimensions are + * known at model construction time), a tensor operand type should be + * (but is not required to be) fully specified, in order to enable the + * best possible performance. + * + * If a tensor operand's type is not fully specified, the dimensions + * of the operand are deduced from the operand types and values of the + * operation for which that operand is an output or from the corresponding + * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input + * operand type in the case of referenced model input operands. + * + * <p>In the following situations, a tensor operand type must be fully + * specified:<ul> + * <li>The operand has a constant value, set by + * {@link ANeuralNetworksModel_setOperandValue} (with a + * non-nullptr buffer) or + * {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li> + * <li>The operand is a model input (see + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main + * model within a compilation. A fully specified tensor operand type + * must either be provided to {@link ANeuralNetworksModel_addOperand}; + * or it must be provided to the corresponding + * {@link ANeuralNetworksExecution_setInput}, or + * {@link ANeuralNetworksExecution_setInputFromMemory}. + * EXCEPTION: If the input is optional and omitted + * (by passing nullptr for buffer to + * {@link ANeuralNetworksExecution_setInput}) then it need + * not have a fully specified tensor operand type.</li> + * <li>The operand is a model output (see + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main + * model within a compilation and is to be used with {@link + * ANeuralNetworksExecution_startComputeWithDependencies}. + * A fully specified tensor operand type must either be provided + * to {@link ANeuralNetworksModel_addOperand}; or it must be + * provided to the corresponding + * {@link ANeuralNetworksExecution_setOutput}, or + * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li></ul> + * + * A tensor operand type of specified rank but some number of + * unspecified dimensions is represented by setting dimensionCount to + * the rank and each unspecified dimension to 0. + * + * Available since API level 27. + * + * Starting at API level 29, a tensor operand type of unspecified rank is + * represented by setting dimensionCount to 0 and dimensions to NULL (just as if + * it were a scalar operand type). + */ +typedef struct ANeuralNetworksOperandType { + /** + * The data type, e.g ANEURALNETWORKS_FLOAT32. + */ + int32_t type; + + /** + * The number of dimensions (rank). + * + * Must be 0 for scalars. + */ + uint32_t dimensionCount; + + /** + * The dimensions of the tensor. + * + * Must be nullptr for scalars. + */ + const uint32_t* dimensions; + + /** + * The quantization scale. + * + * Must be 0 when not applicable to an operand type. + * + * See {@link OperandCode}. + */ + float scale; + + /** + * The quantization zero point. + * + * Must be 0 when not applicable to an operand type. + * + * See {@link OperandCode}. + */ + int32_t zeroPoint; +} ANeuralNetworksOperandType; + +typedef int32_t ANeuralNetworksOperationType; + +/** + * ANeuralNetworksEvent is an opaque type that represents an event + * that will be signaled once an execution completes. + * + * Available since API level 27. + */ +typedef struct ANeuralNetworksEvent ANeuralNetworksEvent; + +#if __ANDROID_API__ >= 29 + +/** + * ANeuralNetworksDevice is an opaque type that represents a device. + * + * This type is used to query basic properties and supported operations of the corresponding + * device, and control which device(s) a model is to be run on. + * + * Available since API level 29. + */ +typedef struct ANeuralNetworksDevice ANeuralNetworksDevice; + +#endif // __ANDROID_API__ >= 29 + +#if __ANDROID_API__ >= 30 + +/** + * ANeuralNetworksMemoryDesc is an opaque type that represents a memory descriptor. + * + * A memory descriptor describes the properties of a memory object, and is used by + * {@link ANeuralNetworksMemory_createFromDesc}. + * + * To use: + * - Create a new memory descriptor by calling {@link ANeuralNetworksMemoryDesc_create}. + * - Specify all of the intended input and output roles by calling + * {@link ANeuralNetworksMemoryDesc_addInputRole} and + * {@link ANeuralNetworksMemoryDesc_addOutputRole}. + * - Optionally, specify the memory dimensions by calling + * {@link ANeuralNetworksMemoryDesc_setDimensions}. + * - Complete the memory descriptor with {@link ANeuralNetworksMemoryDesc_finish}. + * - Use the memory descriptor as many times as needed with + * {@link ANeuralNetworksMemory_createFromDesc}. + * - Destroy the memory descriptor with {@link ANeuralNetworksMemoryDesc_free}. + * + * A memory descriptor is completed by calling {@link ANeuralNetworksMemoryDesc_finish}. + * A memory descriptor is destroyed by calling {@link ANeuralNetworksMemoryDesc_free}. + * + * A memory descriptor must not be modified once {@link ANeuralNetworksMemoryDesc_finish} + * has been called on it. + * + * It is the application's responsibility to make sure that only + * one thread modifies a memory descriptor at a given time. It is however + * safe for more than one thread to use the memory descriptor once + * {@link ANeuralNetworksMemoryDesc_finish} has returned. + * + * It is also the application's responsibility to ensure that there are no other + * uses of the memory descriptor after calling {@link ANeuralNetworksMemoryDesc_free}. + * It is however safe to continue using a {@link ANeuralNetworksMemory} object created + * from the memory descriptor. + * + * Available since API level 30. + */ +typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc; + +/** * Create a {@link ANeuralNetworksMemoryDesc} with no properties. * * This only creates the memory descriptor. Its properties should be set with calls to @@ -78,14 +6325,14 @@ * {@link ANeuralNetworksMemoryDesc_free} must be called once the memory descriptor * is no longer needed. * - * Available since NNAPI feature level 4. + * Available since API level 30. * * @param desc The {@link ANeuralNetworksMemoryDesc} to be created. * Set to NULL if unsuccessful. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ -int ANeuralNetworksMemoryDesc_create(ANeuralNetworksMemoryDesc** desc) __NNAPI_INTRODUCED_IN(30); +int ANeuralNetworksMemoryDesc_create(ANeuralNetworksMemoryDesc** desc) __INTRODUCED_IN(30); /** * Destroy a memory descriptor. @@ -95,12 +6342,12 @@ * * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. * - * Available since NNAPI feature level 4. + * Available since API level 30. * * @param desc The memory descriptor to be destroyed. Passing NULL is acceptable and * results in no operation. */ -void ANeuralNetworksMemoryDesc_free(ANeuralNetworksMemoryDesc* desc) __NNAPI_INTRODUCED_IN(30); +void ANeuralNetworksMemoryDesc_free(ANeuralNetworksMemoryDesc* desc) __INTRODUCED_IN(30); /** * Specify that a memory object will be playing the role of an input to an execution created from a @@ -127,7 +6374,7 @@ * * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. * - * Available since NNAPI feature level 4. + * Available since API level 30. * * @param desc The memory descriptor to be modified. * @param compilation The compilation object. It must already have been finished by calling @@ -146,8 +6393,7 @@ */ int ANeuralNetworksMemoryDesc_addInputRole(ANeuralNetworksMemoryDesc* desc, const ANeuralNetworksCompilation* compilation, - uint32_t index, float frequency) - __NNAPI_INTRODUCED_IN(30); + uint32_t index, float frequency) __INTRODUCED_IN(30); /** * Specify that a memory object will be playing the role of an output to an execution created from a @@ -174,7 +6420,7 @@ * * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. * - * Available since NNAPI feature level 4. + * Available since API level 30. * * @param desc The memory descriptor to be modified. * @param compilation The compilation object. It must already have been finished by calling @@ -193,8 +6439,7 @@ */ int ANeuralNetworksMemoryDesc_addOutputRole(ANeuralNetworksMemoryDesc* desc, const ANeuralNetworksCompilation* compilation, - uint32_t index, float frequency) - __NNAPI_INTRODUCED_IN(30); + uint32_t index, float frequency) __INTRODUCED_IN(30); /** * Set the dimensional information of the memory descriptor. @@ -210,7 +6455,7 @@ * * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. * - * Available since NNAPI feature level 4. + * Available since API level 30. * * @param desc The memory descriptor to be modified. * @param rank The number of dimensions. Must be 0 for scalars. @@ -220,7 +6465,7 @@ * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksMemoryDesc_setDimensions(ANeuralNetworksMemoryDesc* desc, uint32_t rank, - const uint32_t* dimensions) __NNAPI_INTRODUCED_IN(30); + const uint32_t* dimensions) __INTRODUCED_IN(30); /** * Indicate that we have finished modifying a memory descriptor. Required before calling @@ -230,13 +6475,13 @@ * * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. * - * Available since NNAPI feature level 4. + * Available since API level 30. * * @param desc The memory descriptor to be finished. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ -int ANeuralNetworksMemoryDesc_finish(ANeuralNetworksMemoryDesc* desc) __NNAPI_INTRODUCED_IN(30); +int ANeuralNetworksMemoryDesc_finish(ANeuralNetworksMemoryDesc* desc) __INTRODUCED_IN(30); /** * Creates a memory object from a memory descriptor. @@ -259,9 +6504,9 @@ * unspecified dimensions or rank. In such a case, the same memory object may be used with different * shapes of outputs in different executions. When the memory is used as an input, the input shape * must be the same as the output shape from the last execution using this memory object as an - * output, or the last {@link ANeuralNetworksMemory_copy} using this memory object as the - * destination memory. Creating a memory object with unspecified dimensions or rank may fail for - * certain sets of roles. + * output, or the last {@link ANeuralNetworkMemory_copy} using this memory object as the destination + * memory. Creating a memory object with unspecified dimensions or rank may fail for certain sets of + * roles. * * Using the memory in roles or shapes that are not compatible with the rules specified above will * return an error. @@ -281,7 +6526,7 @@ * The provided {@link ANeuralNetworksMemoryDesc} need not outlive the {@link ANeuralNetworksMemory} * object. * - * Available since NNAPI feature level 4. + * Available since API level 30. * * @param desc The memory descriptor. * @param memory The memory object to be created. @@ -292,7 +6537,7 @@ * roles. */ int ANeuralNetworksMemory_createFromDesc(const ANeuralNetworksMemoryDesc* desc, - ANeuralNetworksMemory** memory) __NNAPI_INTRODUCED_IN(30); + ANeuralNetworksMemory** memory) __INTRODUCED_IN(30); /** * Copies data from one memory object to another. @@ -323,7 +6568,7 @@ * The src and dst may have different data layout, in which case the data copying is performed * logically with data layout transformation. * - * Available since NNAPI feature level 4. + * Available since API level 30. * * @param src The source memory object. * @param dst The destination memory object. @@ -331,7 +6576,11 @@ * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksMemory_copy(const ANeuralNetworksMemory* src, const ANeuralNetworksMemory* dst) - __NNAPI_INTRODUCED_IN(30); + __INTRODUCED_IN(30); + +#endif // __ANDROID_API__ >= 30 + +#if __ANDROID_API__ >= 29 /** * Get the number of available devices. @@ -340,9 +6589,9 @@ * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 3. + * Available since API level 29. */ -int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) __NNAPI_INTRODUCED_IN(29); +int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) __INTRODUCED_IN(29); /** * Get the representation of the specified device. @@ -355,10 +6604,10 @@ * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 3. + * Available since API level 29. */ int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice** device) - __NNAPI_INTRODUCED_IN(29); + __INTRODUCED_IN(29); /** * Get the name of the specified device. @@ -367,18 +6616,17 @@ * @param name The returned name of the specified device. The name will be in UTF-8 * and will be null-terminated. It will be recognizable as a known device name * rather than a cryptic string. For devices with feature level reported by - * {@link ANeuralNetworksDevice_getFeatureLevel} that is - * {@link ANEURALNETWORKS_FEATURE_LEVEL_3} and higher, the format of the name is - * {VENDOR}-{DEVICE}. For devices with feature level - * {@link ANEURALNETWORKS_FEATURE_LEVEL_2} or lower, the format of the name is - * undefined. The name will remain valid for the duration of the application. + * {@link ANeuralNetworksDevice_getFeatureLevel} that is 29 and above, the + * format of the name is {VENDOR}-{DEVICE}. For devices with feature level 28 + * or lower, the format of the name is undefined. + * The name will remain valid for the duration of the application. * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 3. + * Available since API level 29. */ int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice* device, const char** name) - __NNAPI_INTRODUCED_IN(29); + __INTRODUCED_IN(29); /** * Get the type of a given device. @@ -394,10 +6642,10 @@ * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 3. + * Available since API level 29. */ int ANeuralNetworksDevice_getType(const ANeuralNetworksDevice* device, int32_t* type) - __NNAPI_INTRODUCED_IN(29); + __INTRODUCED_IN(29); /** * Get the version of the driver implementation of the specified device. @@ -424,37 +6672,31 @@ * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 3. + * Available since API level 29. */ int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice* device, const char** version) - __NNAPI_INTRODUCED_IN(29); + __INTRODUCED_IN(29); /** - * Get the NNAPI feature level of the specified NNAPI device. + * Get the supported NNAPI version of the specified device. * - * Each device has a supported feature level, which is the most advanced NNAPI specification - * and features this driver implements. For example, if the driver implements the features - * introduced in {@link ANEURALNETWORKS_FEATURE_LEVEL_2}, but does not implement the features - * introduced after {@link ANEURALNETWORKS_FEATURE_LEVEL_2}, the value would be - * {@link ANEURALNETWORKS_FEATURE_LEVEL_2}. Developers could decide whether or not the specified - * device should be used for a model that has certain feature requirements. - * - * NNAPI device feature level is closely related to NNAPI runtime feature level - * ({@link ANeuralNetworks_getRuntimeFeatureLevel}), which indicates an NNAPI runtime feature - * level (the most advanced NNAPI specification and features that the runtime implements). - * An NNAPI device feature level is always less than or equal to the runtime feature level. - * - * This function produces a {@link FeatureLevelCode} enum value, NOT an Android API level. + * Each device has a supported feature level, which is the most advanced feature this driver + * implements. For example, if the driver implements the features introduced in Android P, + * but does not implement the features introduced after Android P, the value would be 28. + * Developers could decide whether or not the specified device should be used for a Model that + * has certain feature requirements. * * @param device The representation of the specified device. - * @param featureLevel {@link FeatureLevelCode} of the most advanced feature this driver implements. + * @param featureLevel The API level of the most advanced feature this driver implements. * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 3. + * Available since API level 29. */ int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device, - int64_t* featureLevel) __NNAPI_INTRODUCED_IN(29); + int64_t* featureLevel) __INTRODUCED_IN(29); + +#if __ANDROID_API__ >= 30 /** * Wait until the device is in a live state. @@ -468,9 +6710,11 @@ * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 4. + * Available since API level 30. */ -int ANeuralNetworksDevice_wait(const ANeuralNetworksDevice* device) __NNAPI_INTRODUCED_IN(30); +int ANeuralNetworksDevice_wait(const ANeuralNetworksDevice* device) __INTRODUCED_IN(30); + +#endif // __ANDROID_API__ >= 30 /** * Get the supported operations for a specified set of devices. If multiple devices @@ -487,11 +6731,11 @@ * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 3. + * Available since API level 29. */ int ANeuralNetworksModel_getSupportedOperationsForDevices( const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, - uint32_t numDevices, bool* supportedOps) __NNAPI_INTRODUCED_IN(29); + uint32_t numDevices, bool* supportedOps) __INTRODUCED_IN(29); /** * Create a {@link ANeuralNetworksCompilation} to compile the given model for a specified set @@ -518,13 +6762,13 @@ * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA * if the model is invalid. * - * Available since NNAPI feature level 3. + * Available since API level 29. */ int ANeuralNetworksCompilation_createForDevices(ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, uint32_t numDevices, ANeuralNetworksCompilation** compilation) - __NNAPI_INTRODUCED_IN(29); + __INTRODUCED_IN(29); /** * Sets the compilation caching signature and the cache directory. @@ -549,11 +6793,11 @@ * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 3. + * Available since API level 29. */ int ANeuralNetworksCompilation_setCaching(ANeuralNetworksCompilation* compilation, const char* cacheDir, const uint8_t* token) - __NNAPI_INTRODUCED_IN(29); + __INTRODUCED_IN(29); /** * Schedule synchronous evaluation of the execution. @@ -565,28 +6809,23 @@ * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution, * and the execution is not able to complete before the timeout duration is * exceeded, then execution may be aborted, in which case - * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned. If the device has + * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned. If the device has * a feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} * that is lower than 30, then the timeout duration hint will be ignored. * * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and * the condition model does not output false within the loop timeout duration, - * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} + * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} * will be returned. * - * Before NNAPI feature level 5, this function may only be invoked when the execution is in the - * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be - * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when - * the execution is in the completed state. - * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution. * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution. * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for * asynchronous execution with dependencies. * - * Available since NNAPI feature level 3. + * Available since API level 29. * * @param execution The execution to be scheduled and executed. * @@ -594,15 +6833,16 @@ * ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot * be properly mapped. */ -int ANeuralNetworksExecution_compute(ANeuralNetworksExecution* execution) __NNAPI_INTRODUCED_IN(29); +int ANeuralNetworksExecution_compute(ANeuralNetworksExecution* execution) __INTRODUCED_IN(29); /** * Get the dimensional information of the specified output operand of the model of the - * latest computation evaluated on {@link ANeuralNetworksExecution}. + * {@link ANeuralNetworksExecution}. * - * This function may only be invoked when the execution is in the completed state. - * - * See {@link ANeuralNetworksExecution} for information on execution states. + * The execution must have completed. On asynchronous execution initiated by + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies}, + * {@link ANeuralNetworksEvent_wait} must be called prior to this function. * * @param execution The execution to be queried. * @param index The index of the output argument we are querying. It is @@ -615,20 +6855,20 @@ * if the target output is provided an insufficient buffer at execution time, * ANEURALNETWORKS_BAD_DATA if the index is invalid. * - * Available since NNAPI feature level 3. + * Available since API level 29. */ int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution* execution, int32_t index, uint32_t* rank) - __NNAPI_INTRODUCED_IN(29); + __INTRODUCED_IN(29); /** * Get the dimensional information of the specified output operand of the model of the - * latest computation evaluated on {@link ANeuralNetworksExecution}. The target output operand - * cannot be a scalar. + * {@link ANeuralNetworksExecution}. The target output operand cannot be a scalar. * - * This function may only be invoked when the execution is in the completed state. - * - * See {@link ANeuralNetworksExecution} for information on execution states. + * The execution must have completed. On asynchronous execution initiated by + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies}, + * {@link ANeuralNetworksEvent_wait} must be called prior to this function. * * @param execution The execution to be queried. * @param index The index of the output argument we are querying. It is an index into the lists @@ -641,11 +6881,11 @@ * if the target output is provided an insufficient buffer at execution time, * ANEURALNETWORKS_BAD_DATA if the index is invalid or if the target is a scalar. * - * Available since NNAPI feature level 3. + * Available since API level 29. */ int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution* execution, int32_t index, uint32_t* dimensions) - __NNAPI_INTRODUCED_IN(29); + __INTRODUCED_IN(29); /** * Create a {@link ANeuralNetworksBurst} to apply the given compilation. @@ -655,7 +6895,7 @@ * * <p>The provided compilation must outlive the burst object.</p> * - * Available since NNAPI feature level 3. + * Available since API level 29. * * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. * @param burst The newly created object or NULL if unsuccessful. @@ -664,17 +6904,17 @@ * if the compilation is invalid. */ int ANeuralNetworksBurst_create(ANeuralNetworksCompilation* compilation, - ANeuralNetworksBurst** burst) __NNAPI_INTRODUCED_IN(29); + ANeuralNetworksBurst** burst) __INTRODUCED_IN(29); /** * Destroys the burst object. * - * Available since NNAPI feature level 3. + * Available since API level 29. * * @param burst The burst object to be destroyed. Passing NULL is acceptable and * results in no operation. */ -void ANeuralNetworksBurst_free(ANeuralNetworksBurst* burst) __NNAPI_INTRODUCED_IN(29); +void ANeuralNetworksBurst_free(ANeuralNetworksBurst* burst) __INTRODUCED_IN(29); /** * Schedule synchronous evaluation of the execution on a burst object. @@ -685,33 +6925,26 @@ * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution, * and the execution is not able to complete before the timeout duration is * exceeded, then execution may be aborted, in which case - * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned. + * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned. * * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and * the condition model does not output false within the loop timeout duration, - * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} + * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} * will be returned. If the device has a feature level reported by - * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than - * {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration hint will be ignored. + * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the + * timeout duration hint will be ignored. * * <p>There must be at most one {@link ANeuralNetworksExecution} processing at * any given time for any given burst object. Any * {@link ANeuralNetworksExecution} launched before the previous has finished * will result in ANEURALNETWORKS_BAD_STATE.</p> * - * Before NNAPI feature level 5, this function may only be invoked when the execution is in the - * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be - * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when - * the execution is in the completed state. - * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. - * * See {@link ANeuralNetworksExecution_compute} for synchronous execution. * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution. * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for * asynchronous execution with dependencies. * - * Available since NNAPI feature level 3. + * Available since API level 29. * * @param burst The burst object to execute on. * @param execution The execution to be scheduled and executed. The execution @@ -721,7 +6954,7 @@ * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. */ int ANeuralNetworksExecution_burstCompute(ANeuralNetworksExecution* execution, - ANeuralNetworksBurst* burst) __NNAPI_INTRODUCED_IN(29); + ANeuralNetworksBurst* burst) __INTRODUCED_IN(29); /** * Creates a shared memory object from an AHardwareBuffer handle. @@ -731,7 +6964,7 @@ * {@link ANeuralNetworksMemory} for a description on how to use this shared memory. * * If the shared memory is backed by an AHardwareBuffer of a format other than - * AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for model inputs and outputs. + * AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for Model inputs and outputs. * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or * {@link ANeuralNetworksExecution_setOutputFromMemory} with the shared memory, both * offset and length must be set to zero and the entire memory region will be @@ -746,7 +6979,7 @@ * * The provided AHardwareBuffer must outlive the ANeuralNetworksMemory object. * - * Available since NNAPI feature level 3. + * Available since API level 29. * * @param ahwb The AHardwareBuffer handle. * @param memory The memory object to be created. @@ -758,7 +6991,7 @@ */ int ANeuralNetworksMemory_createFromAHardwareBuffer(const AHardwareBuffer* ahwb, ANeuralNetworksMemory** memory) - __NNAPI_INTRODUCED_IN(29); + __INTRODUCED_IN(29); /** @@ -771,14 +7004,12 @@ * {@link ANeuralNetworksCompilation} which in turn was created from * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1. * If the device has a feature level reported by - * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than - * {@link ANEURALNETWORKS_FEATURE_LEVEL_3}, then the duration will not be measured. + * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 29, then the + * duration will not be measured. * - * This function may only be invoked when the execution is in the preparation state. + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. - * - * Available since NNAPI feature level 3. + * Available since API level 29. * * @param execution The execution to be modified. * @param measure 'true' if duration is to be measured, 'false' if not. @@ -786,15 +7017,15 @@ * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksExecution_setMeasureTiming(ANeuralNetworksExecution* execution, bool measure) - __NNAPI_INTRODUCED_IN(29); + __INTRODUCED_IN(29); /** - * Get the time spent in the latest computation evaluated on the specified - * {@link ANeuralNetworksExecution}, in nanoseconds. + * Get the time spent in the specified {@link ANeuralNetworksExecution}, in nanoseconds. * - * This function may only be invoked when the execution is in the completed state. - * - * See {@link ANeuralNetworksExecution} for information on execution states. + * The execution must have completed. On asynchronous execution initiated by + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies}, + * {@link ANeuralNetworksEvent_wait} must be called prior to this function. * * @param execution The execution to be queried. * @param durationCode The measurement to be queried, specified by {@link DurationCode}. @@ -802,17 +7033,21 @@ * {@link ANeuralNetworksExecution_setMeasureTiming}, if the * device is has a feature level reported by * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower - * than {@link ANEURALNETWORKS_FEATURE_LEVEL_3}, or for some other - * reason the duration is not available, UINT64_MAX will be returned. - * A particular device need not support any given measurement. + * than 29, or for some other reason the duration is not + * available, UINT64_MAX will be returned. A particular device + * need not support any given measurement. * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 3. + * Available since API level 29. */ int ANeuralNetworksExecution_getDuration(const ANeuralNetworksExecution* execution, int32_t durationCode, uint64_t* duration) - __NNAPI_INTRODUCED_IN(29); + __INTRODUCED_IN(29); + +#endif // __ANDROID_API__ >= 29 + +#if __ANDROID_API__ >= 27 /** * Creates a shared memory object from a file descriptor. @@ -821,11 +7056,11 @@ * See {@link ANeuralNetworksMemory} for a description on how to use * this shared memory. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param size The requested size in bytes. * Must not be larger than the file size. - * @param protect The desired memory protection for the mapping. + * @param prot The desired memory protection for the mapping. * It is either PROT_NONE or the bitwise OR of one or * more of the following flags: PROT_READ, PROT_WRITE. * @param fd The requested file descriptor. @@ -839,7 +7074,7 @@ * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. */ int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset, - ANeuralNetworksMemory** memory) __NNAPI_INTRODUCED_IN(27); + ANeuralNetworksMemory** memory) __INTRODUCED_IN(27); /** * Delete a memory object. @@ -848,12 +7083,12 @@ * This will free the underlying actual memory if no other code has open * handles to this memory. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param memory The memory object to be freed. Passing NULL is acceptable and * results in no operation. */ -void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __NNAPI_INTRODUCED_IN(27); +void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __INTRODUCED_IN(27); /** * Create an empty {@link ANeuralNetworksModel}. @@ -874,14 +7109,14 @@ * <p>{@link ANeuralNetworksModel_free} should be called once the model * is no longer needed.</p> * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param model The {@link ANeuralNetworksModel} to be created. * Set to NULL if unsuccessful. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ -int ANeuralNetworksModel_create(ANeuralNetworksModel** model) __NNAPI_INTRODUCED_IN(27); +int ANeuralNetworksModel_create(ANeuralNetworksModel** model) __INTRODUCED_IN(27); /** * Destroy a model. @@ -891,12 +7126,12 @@ * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param model The model to be destroyed. Passing NULL is acceptable and * results in no operation. */ -void ANeuralNetworksModel_free(ANeuralNetworksModel* model) __NNAPI_INTRODUCED_IN(27); +void ANeuralNetworksModel_free(ANeuralNetworksModel* model) __INTRODUCED_IN(27); /** * Indicate that we have finished modifying a model. Required before @@ -910,13 +7145,13 @@ * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param model The model to be finished. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ -int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) __NNAPI_INTRODUCED_IN(27); +int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) __INTRODUCED_IN(27); /** * Add an operand to a model. @@ -930,8 +7165,9 @@ * {@link ANeuralNetworksModel_setOperandValueFromMemory}, * {@link ANeuralNetworksExecution_setInput}, * {@link ANeuralNetworksExecution_setInputFromMemory}, - * {@link ANeuralNetworksExecution_setOutput}, and - * {@link ANeuralNetworksExecution_setOutputFromMemory}. + * {@link ANeuralNetworksExecution_setOutput}, + * {@link ANeuralNetworksExecution_setOutputFromMemory} and + * {@link ANeuralNetworksExecution_setOperandValue}. * * <p>Every operand must be referenced in exactly one of the following * ways:<ul> @@ -941,8 +7177,7 @@ * {@link ANeuralNetworksModel_setOperandValue} or * {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li> * <li>It is identified as an output of exactly one operation with - * {@link ANeuralNetworksModel_addOperation}.</li> - * </ul></p> + * {@link ANeuralNetworksModel_addOperation}.</li></p> * <p>An operand that is identified as a model input or as a constant * must not also be identified as a model output with * {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</p> @@ -958,7 +7193,7 @@ * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param model The model to be modified. * @param type The {@link ANeuralNetworksOperandType} that describes the shape @@ -969,18 +7204,17 @@ * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, - const ANeuralNetworksOperandType* type) - __NNAPI_INTRODUCED_IN(27); + const ANeuralNetworksOperandType* type) __INTRODUCED_IN(27); /** * Sets an operand to a constant value. * * Values of length smaller or equal to - * ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES + * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES} * are immediately copied into the model. * * For values of length greater than - * ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES, a pointer to + * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}, a pointer to * the buffer is stored within the model. The application must not change the * content of this region until all executions using this model have * completed. As the data may be copied during processing, modifying the data @@ -998,7 +7232,7 @@ * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param model The model to be modified. * @param index The index of the model operand we're setting. @@ -1008,8 +7242,9 @@ * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index, - const void* buffer, size_t length) - __NNAPI_INTRODUCED_IN(27); + const void* buffer, size_t length) __INTRODUCED_IN(27); + +#if __ANDROID_API__ >= 29 /** * Sets an operand's per channel quantization parameters. @@ -1020,7 +7255,7 @@ * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} before * calling {@link ANeuralNetworksModel_finish}. * - * Available since NNAPI feature level 3. + * Available since API level 29. * * @param model The model to be modified. * @param index The index of the model operand we're setting. @@ -1032,7 +7267,9 @@ */ int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams( ANeuralNetworksModel* model, int32_t index, - const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) __NNAPI_INTRODUCED_IN(29); + const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) __INTRODUCED_IN(29); + +#endif // __ANDROID_API__ >= 29 /** * Sets an operand to a value stored in a memory object. @@ -1061,10 +7298,11 @@ * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on * AHardwareBuffer usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param model The model to be modified. * @param index The index of the model operand we're setting. + * @param buffer A pointer to the data to use. * @param memory The memory containing the data. * @param offset This specifies the location of the data within the memory. * The offset is in bytes from the start of memory. @@ -1075,7 +7313,9 @@ int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model, int32_t index, const ANeuralNetworksMemory* memory, size_t offset, size_t length) - __NNAPI_INTRODUCED_IN(27); + __INTRODUCED_IN(27); + +#if __ANDROID_API__ >= 30 /** * Sets an operand to a value that is a reference to another NNAPI model. @@ -1094,7 +7334,7 @@ * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * - * Available since NNAPI feature level 4. + * Available since API level 30. * * @param model The model to be modified. * @param index The index of the model operand we're setting. @@ -1104,7 +7344,9 @@ */ int ANeuralNetworksModel_setOperandValueFromModel(ANeuralNetworksModel* model, int32_t index, const ANeuralNetworksModel* value) - __NNAPI_INTRODUCED_IN(30); + __INTRODUCED_IN(30); + +#endif // __ANDROID_API__ >= 30 /** * Add an operation to a model. @@ -1124,14 +7366,14 @@ * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model, ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, - const uint32_t* outputs) __NNAPI_INTRODUCED_IN(27); + const uint32_t* outputs) __INTRODUCED_IN(27); /** * Specifies which operands will be the model's inputs and @@ -1154,13 +7396,14 @@ * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * */ int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, - const uint32_t* outputs) - __NNAPI_INTRODUCED_IN(27); + const uint32_t* outputs) __INTRODUCED_IN(27); + +#if __ANDROID_API__ >= 28 /** * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be @@ -1183,12 +7426,14 @@ * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been * called will return an error. * - * Available since NNAPI feature level 2. + * Available since API level 28. * * See {@link ANeuralNetworksModel} for information on multithreaded usage. */ int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model, bool allow) - __NNAPI_INTRODUCED_IN(28); + __INTRODUCED_IN(28); + +#endif // __ANDROID_API__ >= 28 /** * Create a {@link ANeuralNetworksCompilation} to compile the given model. @@ -1213,7 +7458,7 @@ * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param model The {@link ANeuralNetworksModel} to be compiled. * @param compilation The newly created object or NULL if unsuccessful. @@ -1222,8 +7467,7 @@ * if the model is invalid. */ int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model, - ANeuralNetworksCompilation** compilation) - __NNAPI_INTRODUCED_IN(27); + ANeuralNetworksCompilation** compilation) __INTRODUCED_IN(27); /** * Destroy a compilation. @@ -1233,13 +7477,12 @@ * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param compilation The compilation to be destroyed. Passing NULL is acceptable and * results in no operation. */ -void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation) - __NNAPI_INTRODUCED_IN(27); +void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation) __INTRODUCED_IN(27); /** * Sets the execution preference. @@ -1249,17 +7492,17 @@ * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param compilation The compilation to be modified. - * @param preference Either {@link ANEURALNETWORKS_PREFER_LOW_POWER}, - * {@link ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER}, or - * {@link ANEURALNETWORKS_PREFER_SUSTAINED_SPEED}. + * @param preference Either {@link PREFER_LOW_POWER}, + * {@link PREFER_SINGLE_FAST_ANSWER}, or + * {@link PREFER_SUSTAINED_SPEED}. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation* compilation, - int32_t preference) __NNAPI_INTRODUCED_IN(27); + int32_t preference) __INTRODUCED_IN(27); /** * Indicate that we have finished modifying a compilation. Required before @@ -1274,18 +7517,19 @@ * If {@link ANeuralNetworksCompilation_setTimeout} was called on this * compilation, and the compilation is not able to be finished before the * timeout duration is exceeded, then compilation may be aborted, in which case - * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned. + * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned. * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param compilation The compilation to be finished. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ -int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation) - __NNAPI_INTRODUCED_IN(27); +int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation) __INTRODUCED_IN(27); + +#if __ANDROID_API__ >= 30 /** * Set the execution priority. @@ -1301,7 +7545,7 @@ * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * - * Available since NNAPI feature level 4. + * Available since API level 30. * * @param compilation The compilation to be modified. * @param priority The relative priority of the execution compared to other @@ -1311,7 +7555,7 @@ * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksCompilation_setPriority(ANeuralNetworksCompilation* compilation, int priority) - __NNAPI_INTRODUCED_IN(30); + __INTRODUCED_IN(30); /** * Set the maximum expected duration for compiling the model. @@ -1337,9 +7581,8 @@ * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1, * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the * device has a feature level reported by - * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than - * {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration hint will - * be ignored. + * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the + * timeout duration hint will be ignored. * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * @@ -1351,10 +7594,12 @@ * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 4. + * Available since API level 30. */ int ANeuralNetworksCompilation_setTimeout(ANeuralNetworksCompilation* compilation, - uint64_t duration) __NNAPI_INTRODUCED_IN(30); + uint64_t duration) __INTRODUCED_IN(30); + +#endif // __ANDROID_API__ >= 30 /** * Create a {@link ANeuralNetworksExecution} to apply the given compilation. @@ -1368,7 +7613,7 @@ * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. * @param execution The newly created object or NULL if unsuccessful. @@ -1377,7 +7622,7 @@ * if the compilation is invalid. */ int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation, - ANeuralNetworksExecution** execution) __NNAPI_INTRODUCED_IN(27); + ANeuralNetworksExecution** execution) __INTRODUCED_IN(27); /** * Destroy an execution. @@ -1395,12 +7640,12 @@ * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param execution The execution to be destroyed. Passing NULL is acceptable and * results in no operation. */ -void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) __NNAPI_INTRODUCED_IN(27); +void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) __INTRODUCED_IN(27); /** * Associate a user buffer with an input of the model of the @@ -1415,26 +7660,9 @@ * If the input is optional, you can indicate that it is omitted by * passing nullptr for buffer and 0 for length. * - * Otherwise, if the user has not set the execution to accept padded input buffers by - * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument - * must be equal to the raw size of the input (i.e. the size of an element multiplied by the - * number of elements). Passing a length argument with value not equal to the raw size of the input - * will result in ANEURALNETWORKS_BAD_DATA. + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * - * Otherwise, if the user has set the execution to accept padded input buffers by calling - * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater - * than the raw size of the input, and the extra bytes at the end of the buffer may be used - * by the driver to access data in chunks, for efficiency. Passing a length argument with value - * less than the raw size of the input will result in ANEURALNETWORKS_BAD_DATA. - * - * This function may only be invoked when the execution is in the preparation state. - * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. - * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput} and - * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput} for information on getting - * preferred buffer alignment and padding, to improve performance. - * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param execution The execution to be modified. * @param index The index of the input argument we are setting. It is @@ -1453,14 +7681,14 @@ * nor the dimensions it points to need to outlive the call * to {@link ANeuralNetworksExecution_setInput}. * @param buffer The buffer containing the data. - * @param length The size of the data value in bytes plus any end padding. + * @param length The length in bytes of the buffer. * * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the * name is not recognized or the buffer is too small for the input. */ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const void* buffer, - size_t length) __NNAPI_INTRODUCED_IN(27); + size_t length) __INTRODUCED_IN(27); /** * Associate a region of a memory object with an input of the model of the @@ -1476,35 +7704,13 @@ * using {@link ANeuralNetworksExecution_setInput} instead, passing nullptr for * buffer and 0 for length. * - * If the memory is an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB created - * from {@link ANeuralNetworksMemory_createFromAHardwareBuffer}, or an opaque memory object created - * from {@link ANeuralNetworksMemory_createFromDesc}, both offset and length must be 0, indicating - * the whole memory is used. - * - * Otherwise, if the user has not set the execution to accept padded input memory objects by - * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument - * must be equal to the raw size of the input (i.e. the size of an element multiplied by the - * number of elements). Passing a length argument with value not equal to the raw size of the input - * will result in ANEURALNETWORKS_BAD_DATA. - * - * Otherwise, if the user has set the execution to accept padded input memory objects by calling - * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater - * than the raw size of the input, and the extra bytes at the end of the memory region may be used - * by the driver to access data in chunks, for efficiency. Passing a length argument with value - * less than the raw size of the input will result in ANEURALNETWORKS_BAD_DATA. - * - * This function may only be invoked when the execution is in the preparation state. - * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on * AHardwareBuffer usage. * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects * created from memory descriptors. - * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput} and - * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput} for information on getting - * preferred memory alignment and padding, to improve performance. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param execution The execution to be modified. * @param index The index of the input argument we are setting. It is @@ -1523,7 +7729,7 @@ * @param memory The memory containing the data. * @param offset This specifies the location of the data within the memory. * The offset is in bytes from the start of memory. - * @param length The size of the data value in bytes plus any end padding. + * @param length The size in bytes of the data value. * * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the * name is not recognized or the buffer is too small for the input. @@ -1531,7 +7737,7 @@ int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory, size_t offset, - size_t length) __NNAPI_INTRODUCED_IN(27); + size_t length) __INTRODUCED_IN(27); /** * Associate a user buffer with an output of the model of the @@ -1540,31 +7746,14 @@ * application must not change the content of the buffer until the execution has * completed. * - * <p>The provided buffer must outlive the execution.</p> - * * If the output is optional, you can indicate that it is omitted by * passing nullptr for buffer and 0 for length. * - * Otherwise, if the user has not set the execution to accept padded output buffers by - * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument - * must be equal to the raw size of the output (i.e. the size of an element multiplied by the - * number of elements). Passing a length argument with value not equal to the raw size of the output - * will result in ANEURALNETWORKS_BAD_DATA. + * <p>The provided buffer must outlive the execution.</p> * - * Otherwise, if the user has set the execution to accept padded output buffers by calling - * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater - * than the raw size of the output, and the extra bytes at the end of the buffer may be used - * by the driver to access data in chunks, for efficiency. Passing a length argument with value - * less than the raw size of the output will result in ANEURALNETWORKS_BAD_DATA. + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * - * This function may only be invoked when the execution is in the preparation state. - * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. - * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput} and - * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput} for information on getting - * preferred buffer alignment and padding, to improve performance. - * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param execution The execution to be modified. * @param index The index of the output argument we are setting. It is @@ -1581,21 +7770,21 @@ * passed. Neither the {@link ANeuralNetworksOperandType} * nor the dimensions it points to need to outlive the call * to {@link ANeuralNetworksExecution_setOutput}. - * Since NNAPI feature level 3, the output operand can have unspecified + * Since API level 29, the output operand can have unspecified * dimensions or rank to be deduced dynamically during the execution. * However, the user must provide a large enough buffer. The user * can retrieve the output dimensional information after the execution * by {@link ANeuralNetworksExecution_getOutputOperandRank} and * {@link ANeuralNetworksExecution_getOutputOperandDimensions}. * @param buffer The buffer where the data is to be written. - * @param length The size of the data value in bytes plus any end padding. + * @param length The length in bytes of the buffer. * * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the * name is not recognized or the buffer is too small for the output. */ int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, void* buffer, - size_t length) __NNAPI_INTRODUCED_IN(27); + size_t length) __INTRODUCED_IN(27); /** * Associate a region of a memory object with an output of the model of the @@ -1604,41 +7793,19 @@ * application must not change the content of the region until the execution has * completed. * - * <p>The provided memory must outlive the execution.</p> - * * If the output is optional, you can indicate that it is omitted by * using {@link ANeuralNetworksExecution_setOutput} instead, passing nullptr for * buffer and 0 for length. * - * If the memory is an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB created - * from {@link ANeuralNetworksMemory_createFromAHardwareBuffer}, or an opaque memory object created - * from {@link ANeuralNetworksMemory_createFromDesc}, both offset and length must be 0, indicating - * the whole memory is used. + * <p>The provided memory must outlive the execution.</p> * - * Otherwise, if the user has not set the execution to accept padded output memory objects by - * calling {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, then the length argument - * must be equal to the raw size of the output (i.e. the size of an element multiplied by the - * number of elements). Passing a length argument with value not equal to the raw size of the output - * will result in ANEURALNETWORKS_BAD_DATA. - * - * Otherwise, if the user has set the execution to accept padded output memory objects by calling - * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, the length argument may be greater - * than the raw size of the output, and the extra bytes at the end of the memory region may be used - * by the driver to access data in chunks, for efficiency. Passing a length argument with value - * less than the raw size of the output will result in ANEURALNETWORKS_BAD_DATA. - * - * This function may only be invoked when the execution is in the preparation state. - * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on * AHardwareBuffer usage. * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects * created from memory descriptors. - * See {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput} and - * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput} for information on getting - * preferred memory alignment and padding, to improve performance. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param execution The execution to be modified. * @param index The index of the output argument we are setting. It is @@ -1654,7 +7821,7 @@ * passed. Neither the {@link ANeuralNetworksOperandType} * nor the dimensions it points to need to outlive the call * to {@link ANeuralNetworksExecution_setOutputFromMemory}. - * Since NNAPI feature level 3, the output operand can have unspecified + * Since API level 29, the output operand can have unspecified * dimensions or rank to be deduced dynamically during the execution. * However, the user must provide a large enough memory. The user * can retrieve the output dimensional information after the execution @@ -1663,7 +7830,7 @@ * @param memory The memory where the data is to be stored. * @param offset This specifies the location of the data within the memory. * The offset is in bytes from the start of memory. - * @param length The size of the data value in bytes plus any end padding. + * @param length The length in bytes of the data value. * * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the * name is not recognized or the buffer is too small for the output. @@ -1671,7 +7838,7 @@ int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory, size_t offset, - size_t length) __NNAPI_INTRODUCED_IN(27); + size_t length) __INTRODUCED_IN(27); /** * Schedule asynchronous evaluation of the execution. @@ -1688,36 +7855,30 @@ * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution, * and the execution is not able to complete before the timeout duration is * exceeded, then execution may be aborted, in which case - * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned through + * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned through * {@link ANeuralNetworksExecution_startCompute} or * {@link ANeuralNetworksEvent_wait} on the event object. If the device has a * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that - * is lower than {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout - * duration hint will be ignored. + * is lower than 30, then the timeout duration hint will be ignored. * * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and * the condition model does not output false within the loop timeout duration, - * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} + * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} * will be returned through {@link ANeuralNetworksEvent_wait} on the event * object. * * If the device can detect before the execution has started that the execution * will not complete within the timeout duration, the device may choose to skip - * the execution and instead return ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode}. + * the execution and instead return {@link ANEURALNETWORKS_MISSED_DEADLINE_*}. * - * Before NNAPI feature level 5, this function may only be invoked when the execution is in the - * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be - * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when - * the execution is in the completed state. - * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * See {@link ANeuralNetworksExecution_compute} for synchronous execution. * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution. * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for * asynchronous execution with dependencies. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param execution The execution to be scheduled and executed. * @param event The event that will be signaled on completion. event is set to @@ -1726,7 +7887,9 @@ * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled. */ int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution, - ANeuralNetworksEvent** event) __NNAPI_INTRODUCED_IN(27); + ANeuralNetworksEvent** event) __INTRODUCED_IN(27); + +#if __ANDROID_API__ >= 30 /** * Set the maximum expected duration of the specified execution. @@ -1757,13 +7920,10 @@ * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1, * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the * device has a feature level reported by - * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than - * {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration hint will - * be ignored. + * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the + * timeout duration hint will be ignored. * - * This function may only be invoked when the execution is in the preparation state. - * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * @param execution The execution to be modified. * @param duration The maximum amount of time in nanoseconds that is expected to @@ -1772,10 +7932,10 @@ * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 4. + * Available since API level 30. */ int ANeuralNetworksExecution_setTimeout(ANeuralNetworksExecution* execution, uint64_t duration) - __NNAPI_INTRODUCED_IN(30); + __INTRODUCED_IN(30); /** * Set the maximum duration of WHILE loops in the specified execution. @@ -1789,9 +7949,7 @@ * {@link ANeuralNetworks_getMaximumLoopTimeout} for the default * and maximum timeout values. * - * This function may only be invoked when the execution is in the preparation state. - * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * @param execution The execution to be modified. * @param duration The maximum amount of time in nanoseconds that can be spent @@ -1803,28 +7961,30 @@ * ANEURALNETWORKS_BAD_STATE if execution has started. * ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL. * - * Available since NNAPI feature level 4. + * Available since API level 30. */ int ANeuralNetworksExecution_setLoopTimeout(ANeuralNetworksExecution* execution, uint64_t duration) - __NNAPI_INTRODUCED_IN(30); + __INTRODUCED_IN(30); /** * Get the default timeout value for WHILE loops. * * @return The default timeout value in nanoseconds. * - * Available since NNAPI feature level 4. + * Available since API level 30. */ -uint64_t ANeuralNetworks_getDefaultLoopTimeout() __NNAPI_INTRODUCED_IN(30); +uint64_t ANeuralNetworks_getDefaultLoopTimeout() __INTRODUCED_IN(30); /** * Get the maximum timeout value for WHILE loops. * * @return The maximum timeout value in nanoseconds. * - * Available since NNAPI feature level 4. + * Available since API level 30. */ -uint64_t ANeuralNetworks_getMaximumLoopTimeout() __NNAPI_INTRODUCED_IN(30); +uint64_t ANeuralNetworks_getMaximumLoopTimeout() __INTRODUCED_IN(30); + +#endif // __ANDROID_API__ >= 30 /** * Waits until the execution completes. @@ -1835,36 +7995,39 @@ * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution * corresponding to this event, and the execution is not able to complete * before the duration is exceeded, the execution may be aborted, in which case - * ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be returned here. + * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned here. * * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and * the condition model does not output false within the loop timeout duration, - * the execution will be aborted, and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} + * the execution will be aborted, and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} * will be returned here. * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param event The event that will be signaled on completion. * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. * ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot * be properly mapped. */ -int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) __NNAPI_INTRODUCED_IN(27); +int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); /** * Destroys the event. * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * - * Available since NNAPI feature level 1. + * Available since API level 27. * * @param event The event object to be destroyed. Passing NULL is acceptable and * results in no operation. */ -void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __NNAPI_INTRODUCED_IN(27); +void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); +#endif // __ANDROID_API__ >= 27 + +#if __ANDROID_API__ >= 30 /** * Create a {@link ANeuralNetworksEvent} from a sync_fence file descriptor. * @@ -1876,10 +8039,10 @@ * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 4. + * Available since API level 30. */ int ANeuralNetworksEvent_createFromSyncFenceFd(int sync_fence_fd, ANeuralNetworksEvent** event) - __NNAPI_INTRODUCED_IN(30); + __INTRODUCED_IN(30); /** * Get sync_fence file descriptor from the event. @@ -1900,10 +8063,10 @@ * * @return ANEURALNETWORKS_NO_ERROR if successful. * - * Available since NNAPI feature level 4. + * Available since API level 30. */ int ANeuralNetworksEvent_getSyncFenceFd(const ANeuralNetworksEvent* event, int* sync_fence_fd) - __NNAPI_INTRODUCED_IN(30); + __INTRODUCED_IN(30); /** * Schedule asynchronous evaluation of the execution with dependencies. @@ -1942,25 +8105,19 @@ * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If either * the timeout duration from {@link ANeuralNetworksExecution_setTimeout} or the * timeout duration passed to this call is exceeded, the execution may be - * aborted, in which case ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} will be + * aborted, in which case {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be * returned through {@link ANeuralNetworksExecution_startComputeWithDependencies} * or {@link ANeuralNetworksEvent_wait} on the event object. If the device has a * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that - * is lower than {@link ANEURALNETWORKS_FEATURE_LEVEL_4}, then the timeout duration - * hints will be ignored. + * is lower than 30, then the timeout duration hints will be ignored. * * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and * the condition model does not output false within the loop timeout duration, - * then execution will be aborted and ANEURALNETWORKS_MISSED_DEADLINE_* {@link ResultCode} + * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} * will be returned through {@link ANeuralNetworksEvent_wait} on the event * object. * - * Before NNAPI feature level 5, this function may only be invoked when the execution is in the - * preparation state. Starting at NNAPI feature level 5, if the user sets the execution to be - * reusable by {@link ANeuralNetworksExecution_setReusable}, this function may also be invoked when - * the execution is in the completed state. - * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * See {@link ANeuralNetworksExecution_compute} for synchronous execution. * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution. @@ -1979,259 +8136,17 @@ * * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled. * - * Available since NNAPI feature level 4. + * Available since API level 30. */ int ANeuralNetworksExecution_startComputeWithDependencies( ANeuralNetworksExecution* execution, const ANeuralNetworksEvent* const* dependencies, uint32_t num_dependencies, uint64_t duration, ANeuralNetworksEvent** event) - __NNAPI_INTRODUCED_IN(30); + __INTRODUCED_IN(30); -/** - * Get the NNAPI runtime feature level. - * - * Since API level 31 (NNAPI feature level 5), the NNAPI runtime (libneuralnetworks.so) and its - * API specification can be updated between Android API releases. - * - * On Android devices with API level 31 and newer, for NNAPI runtime feature discovery, - * the NNAPI runtime feature level must be used instead of the Android device API level. - * - * On Android devices with API level 30 and older, the Android API level of the Android - * device must be used for NNAPI runtime feature discovery. Enum values in - * {@link FeatureLevelCode} from feature level 1 to 5 have their corresponding Android - * API levels listed in their documentation, and each such enum value equals the corresponding - * API level. This allows using the Android API level as the feature level. - * This mapping between enum value and Android API level does not exist for feature levels - * after NNAPI feature level 5 and API levels after S (31). - * - * Example usage: - * int device_api_level = android_get_device_api_level(); - * int64_t runtime_feature_level = (device_api_level < __ANDROID_API_S__) ? - * device_api_level : ANeuralNetworks_getRuntimeFeatureLevel(); - * - * Runtime feature level is closely related to NNAPI device feature level - * ({@link ANeuralNetworksDevice_getFeatureLevel}), which indicates an NNAPI device feature level - * (the most advanced NNAPI specification and features that the driver implements). - * This function expresses NNAPI runtime feature level, which indicates the most advanced - * NNAPI specification and features the runtime implements. An NNAPI device feature level is - * always less than or equal to the runtime feature level. - * - * This function returns a {@link FeatureLevelCode} enum value, - * which is the NNAPI specification version that this NNAPI runtime implements. - * It is NOT an Android API level. - * - * Available since NNAPI feature level 5. - */ -int64_t ANeuralNetworks_getRuntimeFeatureLevel() __NNAPI_INTRODUCED_IN(31); - -/** - * Specifies whether the {@link ANeuralNetworksExecution} is able to accept padded input and output - * buffers and memory objects. - * - * By default, the input and output buffers and memory objects of {@link ANeuralNetworksExecution} - * do not allow padding. - * - * Setting the execution to accept padded input and output buffers and memory objects enables the - * length argument of {@link ANeuralNetworksExecution_setInput}, - * {@link ANeuralNetworksExecution_setInputFromMemory}, {@link ANeuralNetworksExecution_setOutput}, - * and {@link ANeuralNetworksExecution_setOutputFromMemory} to be greater than the raw size of the - * operand (i.e. the size of an element multiplied by the number of elements). The extra bytes - * at the end of the buffer or memory region may be used by the driver to access data in chunks, - * for efficiency. - * - * This method must not be called after {@link ANeuralNetworksExecution_setInput}, - * {@link ANeuralNetworksExecution_setInputFromMemory}, {@link ANeuralNetworksExecution_setOutput}, - * or {@link ANeuralNetworksExecution_setOutputFromMemory}. - * - * See {@link ANeuralNetworksExecution} for information on multithreaded usage. - * - * @param execution The execution to be modified. - * @param enable 'true' if the execution is to be able to accept padded input and output buffers - * and memory objects, 'false' if not. - * - * @return ANEURALNETWORKS_NO_ERROR if successful. - * ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL. - * ANEURALNETWORKS_BAD_STATE if {@link ANeuralNetworksExecution_setInput}, - * {@link ANeuralNetworksExecution_setInputFromMemory}, - * {@link ANeuralNetworksExecution_setOutput}, or - * {@link ANeuralNetworksExecution_setOutputFromMemory} has been called on the execution. - * - * Available since NNAPI feature level 5. - */ -int ANeuralNetworksExecution_enableInputAndOutputPadding(ANeuralNetworksExecution* execution, - bool enable) __NNAPI_INTRODUCED_IN(31); - -/** - * Get the preferred buffer and memory alignment of an input to an execution created from a - * particular compilation. - * - * The user may use the returned alignment value to guide the layout of the input buffer or memory - * pool. To achieve the best performance, make sure the address of the buffer passed in - * {@link ANeuralNetworksExecution_setInput}, or the offset value passed in - * {@link ANeuralNetworksExecution_setInputFromMemory}, is a multiple of the perferred alignment - * value of the same input. A driver may choose to allocate a separate buffer and do memory copying - * if the provided buffer or memory does not satisfy the preferred alignment. - * - * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. - * - * @param compilation The compilation object. It must already have been finished by calling - * {@link ANeuralNetworksCompilation_finish}. - * @param index The index of the input argument we are referencing from the compilation. It is - * an index into the inputs list passed to - * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not - * the index associated with {@link ANeuralNetworksModel_addOperand}. - * @param alignment The returned preferred alignment in bytes. It will be a power of 2. - * - * @return ANEURALNETWORKS_NO_ERROR if successful. - * ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or alignment is NULL. - * ANEURALNETWORKS_BAD_STATE if the compilation has not been finished. - * ANEURALNETWORKS_BAD_DATA if the index is out of range. - * - * Available since NNAPI feature level 5. - */ -int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput( - const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment) - __NNAPI_INTRODUCED_IN(31); - -/** - * Get the preferred buffer and memory end padding of an input to an execution created from a - * particular compilation. - * - * The user may use the returned padding value to guide the layout of the input buffer or memory - * pool. To achieve the best performance, make sure the length value passed in - * {@link ANeuralNetworksExecution_setInput} or - * {@link ANeuralNetworksExecution_setInputFromMemory} is greater than or equal to the raw size of - * the input (i.e. the size of an element multiplied by the number of elements) rounding up to - * a multiple of the perferred padding value of the same input. A driver may choose to allocate a - * separate buffer and do memory copying if the provided buffer or memory value does not satisfy - * the preferred padding. - * - * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. - * See {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, - * {@link ANeuralNetworksExecution_setInput}, and - * {@link ANeuralNetworksExecution_setInputFromMemory} for information on passing - * input buffer or memory padding to the driver. - * - * @param compilation The compilation object. It must already have been finished by calling - * {@link ANeuralNetworksCompilation_finish}. - * @param index The index of the input argument we are referencing from the compilation. It is - * an index into the inputs list passed to - * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not - * the index associated with {@link ANeuralNetworksModel_addOperand}. - * @param padding The returned preferred padding in bytes. It will be a power of 2. - * - * @return ANEURALNETWORKS_NO_ERROR if successful. - * ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or padding is NULL. - * ANEURALNETWORKS_BAD_STATE if the compilation has not been finished. - * ANEURALNETWORKS_BAD_DATA if the index is out of range. - * - * Available since NNAPI feature level 5. - */ -int ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput( - const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding) - __NNAPI_INTRODUCED_IN(31); - -/** - * Get the preferred buffer and memory alignment of an output to an execution created from a - * particular compilation. - * - * The user may use the returned alignment value to guide the layout of the output buffer or memory - * pool. To achieve the best performance, make sure the address of the buffer passed in - * {@link ANeuralNetworksExecution_setOutput}, or the offset value passed in - * {@link ANeuralNetworksExecution_setOutputFromMemory}, is a multiple of the perferred alignment - * value of the same output. A driver may choose to allocate a separate buffer and do memory copying - * if the provided buffer or memory does not satisfy the preferred alignment. - * - * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. - * - * @param compilation The compilation object. It must already have been finished by calling - * {@link ANeuralNetworksCompilation_finish}. - * @param index The index of the output argument we are referencing from the compilation. It is - * an index into the outputs list passed to - * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not - * the index associated with {@link ANeuralNetworksModel_addOperand}. - * @param alignment The returned perferred alignment in bytes. It will be a power of 2. - * - * @return ANEURALNETWORKS_NO_ERROR if successful. - * ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or alignment is NULL. - * ANEURALNETWORKS_BAD_STATE if the compilation has not been finished. - * ANEURALNETWORKS_BAD_DATA if the index is out of range. - * - * Available since NNAPI feature level 5. - */ -int ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput( - const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment) - __NNAPI_INTRODUCED_IN(31); - -/** - * Get the preferred memory end padding of an output to an execution created from a particular - * compilation. - * - * The user may use the returned padding value to guide the layout of the output buffer or memory - * pool. To achieve the best performance, make sure the length value passed in - * {@link ANeuralNetworksExecution_setOutput} or - * {@link ANeuralNetworksExecution_setOutputFromMemory} is greater than or equal to the raw size of - * the output (i.e. the size of an element multiplied by the number of elements) rounding up to - * a multiple of the perferred padding value of the same output. A driver may choose to allocate a - * separate buffer and do memory copying if the provided buffer or memory value does not satisfy - * the preferred padding. - * - * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. - * See {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, - * {@link ANeuralNetworksExecution_setOutput}, and - * {@link ANeuralNetworksExecution_setOutputFromMemory} for information on passing - * output buffer or memory padding to the driver. - * - * @param compilation The compilation object. It must already have been finished by calling - * {@link ANeuralNetworksCompilation_finish}. - * @param index The index of the output argument we are referencing from the compilation. It is - * an index into the outputs list passed to - * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not - * the index associated with {@link ANeuralNetworksModel_addOperand}. - * @param padding The returned perferred padding in bytes. It will be a power of 2. - * - * @return ANEURALNETWORKS_NO_ERROR if successful. - * ANEURALNETWORKS_UNEXPECTED_NULL if either compilation or padding is NULL. - * ANEURALNETWORKS_BAD_STATE if the compilation has not been finished. - * ANEURALNETWORKS_BAD_DATA if the index is out of range. - * - * Available since NNAPI feature level 5. - */ -int ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput( - const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding) - __NNAPI_INTRODUCED_IN(31); - -/** - * Specifies whether the {@link ANeuralNetworksExecution} can be reused for multiple computations. - * - * By default, the {@link ANeuralNetworksExecution} is not reusable. - * - * Setting the execution to be reusable enables multiple computations to be scheduled and evaluated - * on the same execution sequentially, either by means of - * {@link ANeuralNetworksExecution_burstCompute}, {@link ANeuralNetworksExecution_compute}, - * {@link ANeuralNetworksExecution_startCompute} or - * {@link ANeuralNetworksExecution_startComputeWithDependencies}: The application may schedule and - * evaluate a computation again from the completed state of a reusable execution. - * - * This function may only be invoked when the execution is in the preparation state. - * - * See {@link ANeuralNetworksExecution} for information on execution states and multithreaded usage. - * - * @param execution The execution to be modified. - * @param reusable 'true' if the execution is to be reusable, 'false' if not. - * - * @return ANEURALNETWORKS_NO_ERROR if successful. - * ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL. - * ANEURALNETWORKS_BAD_STATE if the execution is not in the preparation state. - * - * Available since NNAPI feature level 5. - */ -int ANeuralNetworksExecution_setReusable(ANeuralNetworksExecution* execution, bool reusable) - __NNAPI_INTRODUCED_IN(31); +#endif // __ANDROID_API__ >= 30 __END_DECLS #endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H -#undef __NNAPI_INTRODUCED_IN - /** @} */
diff --git a/runtime/include/NeuralNetworksExtensions.h b/runtime/include/NeuralNetworksExtensions.h index 34ab6dd..dd51b03 100644 --- a/runtime/include/NeuralNetworksExtensions.h +++ b/runtime/include/NeuralNetworksExtensions.h
@@ -37,6 +37,8 @@ __BEGIN_DECLS +#if __ANDROID_API__ >= 29 + /** * Queries whether an extension is supported by the driver implementation of the specified device. * @@ -108,6 +110,8 @@ const void* data, size_t length) __INTRODUCED_IN(29); +#endif // __ANDROID_API__ >= 29 + __END_DECLS #endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_EXTENSIONS_H
diff --git a/runtime/include/NeuralNetworksOEM.h b/runtime/include/NeuralNetworksOEM.h index 1664097..54a5dfe 100644 --- a/runtime/include/NeuralNetworksOEM.h +++ b/runtime/include/NeuralNetworksOEM.h
@@ -36,6 +36,8 @@ * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES */ +#if __ANDROID_API__ >= 27 + enum { /** * DEPRECATED. Use Extensions instead. @@ -53,10 +55,9 @@ }; // extends OperandCode /** - * Before API level 30, if a model contains an - * {@link ANEURALNETWORKS_OEM_OPERATION}, then either the model must contain - * only a single operation, or every tensor operand type in the model must be - * fully specified. + * If a model contains an {@link ANEURALNETWORKS_OEM_OPERATION}, then + * either the model must contain only a single operation, or every + * tensor operand type in the model must be fully specified. */ enum { /** @@ -67,4 +68,6 @@ ANEURALNETWORKS_OEM_OPERATION = 10000, }; // extends OperationCode +#endif // __ANDROID_API__ >= 27 + #endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_OEM_H
diff --git a/runtime/include/NeuralNetworksTypes.h b/runtime/include/NeuralNetworksTypes.h deleted file mode 100644 index 5edf942..0000000 --- a/runtime/include/NeuralNetworksTypes.h +++ /dev/null
@@ -1,6372 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @addtogroup NeuralNetworks - * @{ - */ - -/** - * @file NeuralNetworksTypes.h - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H -#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H - -/****************************************************************** - * - * IMPORTANT NOTICE: - * - * This file is part of Android's set of stable system headers - * exposed by the Android NDK (Native Development Kit). - * - * Third-party source AND binary code relies on the definitions - * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. - * - * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) - * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS - * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY - * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES - */ - -#include <android/hardware_buffer.h> -#include <stdbool.h> -#include <stddef.h> -#include <stdint.h> -#include <sys/cdefs.h> - -__BEGIN_DECLS - -/** - * Operand types. - * - * The type of an operand in a model. - * - * Types prefaced with ANEURALNETWORKS_TENSOR_* must be used for tensor data (i.e., tensors - * with at least one dimension). Types not prefaced by ANEURALNETWORKS_TENSOR_* represent - * scalar values and must have no dimensions. - * - * Although we define many types, most operators accept just a few - * types. Most used are {@link ANEURALNETWORKS_TENSOR_FLOAT32}, - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, - * and {@link ANEURALNETWORKS_INT32}. - * - * Available since NNAPI feature level 1. - */ -typedef enum { - /** A 32 bit floating point scalar value. */ - ANEURALNETWORKS_FLOAT32 = 0, - /** A signed 32 bit integer scalar value. */ - ANEURALNETWORKS_INT32 = 1, - /** An unsigned 32 bit integer scalar value. */ - ANEURALNETWORKS_UINT32 = 2, - /** A tensor of 32 bit floating point values. */ - ANEURALNETWORKS_TENSOR_FLOAT32 = 3, - /** A tensor of 32 bit integer values. */ - ANEURALNETWORKS_TENSOR_INT32 = 4, - /** - * A tensor of 8 bit unsigned integers that represent real numbers. - * - * Attached to this tensor are two numbers that can be used to convert the - * 8 bit integer to the real value and vice versa. These two numbers are: - * - scale: a 32 bit floating point value greater than zero. - * - zeroPoint: a 32 bit integer, in range [0, 255]. - * - * The formula is: - * real_value = (integer_value - zeroPoint) * scale. - */ - ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5, - /** - * An 8 bit boolean scalar value. - * - * Values of this operand type are either true or false. A zero value - * represents false; any other value represents true. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_BOOL = 6, - /** - * A tensor of 16 bit signed integers that represent real numbers. - * - * Attached to this tensor is a number representing real value scale that is - * used to convert the 16 bit number to a real value in the following way: - * realValue = integerValue * scale. - * - * scale is a 32 bit floating point with value greater than zero. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7, - /** - * A tensor of IEEE 754 16 bit floating point values. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_TENSOR_FLOAT16 = 8, - /** - * A tensor of 8 bit boolean values. - * - * Values of this operand type are either true or false. A zero value - * represents false; any other value represents true. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_TENSOR_BOOL8 = 9, - /** - * An IEEE 754 16 bit floating point scalar value. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_FLOAT16 = 10, - /** - * A tensor of 8 bit signed integers that represent real numbers. - * - * This tensor is associated with additional fields that can - * be used to convert the 8 bit signed integer to the real value and vice versa. - * These fields are: - * - channelDim: a 32 bit unsigned integer indicating channel dimension. - * - scales: an array of positive 32 bit floating point values. - * The size of the scales array must be equal to dimensions[channelDim]. - * - * {@link ANeuralNetworksModel_setOperandSymmPerChannelQuantParams} must be used - * to set the parameters for an Operand of this type. - * - * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0). - * - * The formula is: - * realValue[..., C, ...] = - * integerValue[..., C, ...] * scales[C] - * where C is an index in the Channel dimension. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11, - /** - * A tensor of 16 bit unsigned integers that represent real numbers. - * - * Attached to this tensor are two numbers that can be used to convert the - * 16 bit integer to the real value and vice versa. These two numbers are: - * - scale: a 32 bit floating point value greater than zero. - * - zeroPoint: a 32 bit integer, in range [0, 65535]. - * - * The formula is: - * real_value = (integer_value - zeroPoint) * scale. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_TENSOR_QUANT16_ASYMM = 12, - /** - * A tensor of 8 bit signed integers that represent real numbers. - * - * Attached to this tensor is a number representing real value scale that is - * used to convert the 8 bit number to a real value in the following way: - * realValue = integerValue * scale. - * - * scale is a 32 bit floating point with value greater than zero. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13, - /** - * A tensor of 8 bit signed integers that represent real numbers. - * - * Attached to this tensor are two numbers that can be used to convert the - * 8 bit integer to the real value and vice versa. These two numbers are: - * - scale: a 32 bit floating point value greater than zero. - * - zeroPoint: a 32 bit integer, in range [-128, 127]. - * - * The formula is: - * real_value = (integer_value - zeroPoint) * scale. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14, - - /** - * A reference to a model. - * - * {@link ANeuralNetworksModel_setOperandValueFromModel} must be used to set - * the value for an Operand of this type. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_MODEL = 15, -} OperandCode; - -/** - * Operation types. - * - * The type of an operation in a model. - * - * Available since NNAPI feature level 1. - */ -typedef enum { - // Operations below are available since NNAPI feature level 1. - - /** - * Adds two tensors, element-wise. - * - * Takes two input tensors of identical {@link OperandCode} and compatible - * dimensions. The output is the sum of both input tensors, optionally - * modified by an activation function. - * - * Two dimensions are compatible when: - * 1. they are equal, or - * 2. one of them is 1 - * - * The size of the output is the maximum size along each dimension of the - * input operands. It starts with the trailing dimensions, and works its - * way forward. - * - * Example: - * - * input1.dimension = {4, 1, 2} - * input2.dimension = {5, 4, 3, 1} - * output.dimension = {5, 4, 3, 2} - * - * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero - * dimension is only compatible with 0 or 1. The size of the output - * dimension is zero if either of corresponding input dimension is zero. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions - * as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scales and zeroPoint can be different from input0 scale and zeroPoint. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, - * the {@link FuseCode} must be "NONE". - * - * Outputs: - * * 0: The sum, a tensor of the same {@link OperandCode} as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_ADD = 0, - - /** - * Performs a 2-D average pooling operation. - * - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * The values in the output tensor are computed as: - * - * output[b, i, j, channel] = - * sum_{di, dj}( - * input[b, strides[1] * i + di, strides[2] * j + dj, channel] - * ) / sum(1) - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since NNAPI feature level 3. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since NNAPI feature level 3, zero batches is supported for this tensor. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter - * width. - * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter - * height. - * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since NNAPI feature level 3, zero batches is supported for this tensor. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * {@link PaddingCode} values. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter - * width. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter - * height. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth]. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_AVERAGE_POOL_2D = 1, - - /** - * Concatenates the input tensors along the given dimension. - * - * The input tensors must have identical {@link OperandCode} and the same - * dimensions except the dimension along the concatenation axis. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * (full support since NNAPI feature level 3, see the input section) - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0 ~ n-1: The list of n input tensors, of shape - * [D0, D1, ..., Daxis(i), ..., Dm]. - * Before NNAPI feature level 3, all input tensors of - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * must have the same scale and zeroPoint as the output tensor. - * Input tensors of - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} - * are allowed to have different scale and zeroPoint. - * Since NNAPI feature level 3, zero-sized tensors are supported. - * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the - * concatenation axis. - * - * Outputs: - * * 0: The output, a tensor of the same {@link OperandCode} as the input - * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. - * Since NNAPI feature level 3, for a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, - * the scale and zeroPoint values can be different from - * input tensors. Before NNAPI feature level 3 they have to be the same as for the - * input tensors. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_CONCATENATION = 2, - - /** - * Performs a 2-D convolution operation. - * - * The CONV_2D op sweeps a 2-D filter that can mix channels together over a - * batch of images, applying the filter to each window of each image of the - * appropriate size. - * - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * The values in the output tensor are computed as: - * - * output[b, i, j, channel] = - * sum_{di, dj, k} ( - * input[b, strides[1] * i + di, strides[2] * j + dj, k] * - * filter[channel, di, dj, k] - * ) + bias[channel] - * - * Supported tensor {@link OperandCode} configurations: - * * 32 bit floating point: - * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. - * - * * Quantized: - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * Available since NNAPI feature level 3: - * * 16 bit floating point: - * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. - * - * * Quantized with symmetric per channel quantization for the filter: - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Available since NNAPI feature level 4: - * * Quantized signed (since NNAPI feature level 4): - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * * Quantized signed with filter symmetric per channel quantization - * (since NNAPI feature level 4): - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since NNAPI feature level 3. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * Since NNAPI feature level 3, zero batches is supported for this tensor. - * * 1: A 4-D tensor, of shape - * [depth_out, filter_height, filter_width, depth_in], specifying the - * filter. - * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} - * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) - * must be set to 0. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint - * of 0 and bias_scale == input_scale * filter_scale. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 - * and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * * 11: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation - * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on width dimension. If this input is set, - * input 12 (dilation factor for height) must be specified as well. - * Available since NNAPI feature level 3. - * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation - * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on height dimension. If this input is set, - * input 11 (dilation factor for width) must be specified as well. - * Available since NNAPI feature level 3. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * Since NNAPI feature level 3, zero batches is supported for this tensor. - * * 1: A 4-D tensor, of shape - * [depth_out, filter_height, filter_width, depth_in], specifying the - * filter. - * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} - * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) - * must be set to 0. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same - * type. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint - * of 0 and bias_scale == input_scale * filter_scale. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 - * and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * {@link PaddingCode} values. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * * 8: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation - * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on width dimension. If this input is set, - * input 9 (dilation factor for height) must be specified as well. - * Available since NNAPI feature level 3. - * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation - * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on height dimension. If this input is set, - * input 8 (dilation factor for width) must be specified as well. - * Available since NNAPI feature level 3. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. - * Before NNAPI feature level 3, for output tensor of - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following condition must - * be satisfied: output_scale > input_scale * filter_scale - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_CONV_2D = 3, - - /** - * Performs a depthwise 2-D convolution operation. - * - * Given an input tensor of shape [batches, height, width, depth_in] and a - * filter tensor of shape [1, filter_height, filter_width, depth_out] - * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV - * applies a different filter to each input channel (expanding from 1 - * channel to channel_multiplier channels for each), then concatenates the - * results together. - * - * The output has depth_out = depth_in * depth_multiplier channels. - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * The values in the output tensor are computed as: - * - * output[b, i, j, k * channel_multiplier + q] = - * sum_{di, dj} ( - * input[b, strides[1] * i + di, strides[2] * j + dj, k] * - * filter[1, di, dj, k * channel_multiplier + q] - * ) + bias[k * channel_multiplier + q] - * - * Supported tensor {@link OperandCode} configurations: - * * 32 bit floating point: - * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. - * - * * Quantized: - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * Available since NNAPI feature level 3: - * * 16 bit floating point: - * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. - * - * * Quantized with symmetric per channel quantization for the filter: - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Available since NNAPI feature level 4: - * * Quantized signed (since NNAPI feature level 4): - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * * Quantized signed with filter symmetric per channel quantization - * (since NNAPI feature level 4): - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since NNAPI feature level 3. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], - * specifying the filter. - * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} - * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) - * must be set to 3. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint - * of 0 and bias_scale == input_scale * filter_scale. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 - * and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise - * multiplier. - * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 11: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation - * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on width dimension. If this input is set, - * input 13 (dilation factor for height) must be specified as well. - * Available since NNAPI feature level 3. - * * 13: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation - * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on height dimension. If this input is set, - * input 12 (dilation factor for width) must be specified as well. - * Available since NNAPI feature level 3. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], - * specifying the filter. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint - * of 0 and bias_scale == input_scale * filter_scale. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 - * and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * {@link PaddingCode} values. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise - * multiplier. - * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 8: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation - * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on width dimension. If this input is set, - * input 10 (dilation factor for height) must be specified as well. - * Available since NNAPI feature level 3. - * * 10: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation - * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped - * cells between each filter element on height dimension. If this input is set, - * input 9 (dilation factor for width) must be specified as well. - * Available since NNAPI feature level 3. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. Before NNAPI feature level 3, for - * output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, - * the following condition must be satisfied: - * output_scale > input_scale * filter_scale - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4, - - /** - * Rearranges data from depth into blocks of spatial data. - * - * More specifically, this op outputs a copy of the input tensor where - * values from the depth dimension are moved in spatial blocks to the height - * and width dimensions. The value block_size indicates the input block size - * and how the data is moved. - * - * Chunks of data of size block_size * block_size from depth are rearranged - * into non-overlapping blocks of size block_size x block_size. - * - * The width of the output tensor is input_depth * block_size, whereas the - * height is input_height * block_size. The depth of the input tensor must - * be divisible by block_size * block_size - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since NNAPI feature level 3. - * - * Inputs: - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size. - * block_size must be >=1 and block_size * block_size must be a divisor - * of the input depth. - * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * - * Outputs: - * * 0: The output 4-D tensor, of shape [batch, height*block_size, - * width*block_size, depth/(block_size*block_size)]. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_DEPTH_TO_SPACE = 5, - - /** - * Dequantizes the input tensor. - * - * The formula is: - * - * output = (input - zeroPoint) * scale. - * - * Supported input tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported output tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: A tensor. - * Since NNAPI feature level 3, this tensor may be zero-sized. - * - * Outputs: - * * 0: A tensor with the same shape as input0. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_DEQUANTIZE = 6, - - /** - * Looks up sub-tensors in the input tensor. - * - * This operator takes for input a tensor of values (Values) and - * a one-dimensional tensor of selection indices (Lookups). - * The output tensor is the concatenation of sub-tensors of Values as - * selected by Lookups. - * - * Think of Values as being sliced along its first dimension: - * The entries in Lookups select which slices are concatenated together - * to create the output tensor. - * - * For example, if Values has shape of [40, 200, 300] and - * Lookups has shape of [3], all three values found in Lookups are - * expected to be between 0 and 39. The resulting tensor must - * have shape of [3, 200, 300]. - * - * If a value in Lookups is out of bounds, the operation must fail - * and an error must be reported. - * - * Supported value tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 4) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported value tensor rank: from 2 - * - * Inputs: - * * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. - * The values are indices into the first dimension of Values. - * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are - * extracted. - * - * Output: - * * 0: A n-D tensor with the same rank and shape as the Values - * tensor, except for the first dimension which has the same size - * as Lookups' only dimension. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input1. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_EMBEDDING_LOOKUP = 7, - - /** - * Computes element-wise floor() on the input tensor. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor, of the same {@link OperandCode} and dimensions as - * the input tensor. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_FLOOR = 8, - - /** - * Denotes a fully (densely) connected layer, which connects all elements - * in the input tensor with each element in the output tensor. - * - * This layer implements the operation: - * - * outputs = activation(inputs * weights’ + bias) - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor of at least rank 2, specifying the input. If rank is - * greater than 2, then it gets flattened to a 2-D Tensor. The - * (flattened) 2-D Tensor is reshaped (if necessary) to - * [batch_size, input_size], where "input_size" corresponds to the - * number of inputs to the layer, matching the second dimension of - * weights, and "batch_size" is calculated by dividing the number of - * elements by "input_size". - * Since NNAPI feature level 3, zero batch_size is supported for this tensor. - * * 1: A 2-D tensor, specifying the weights, of shape - * [num_units, input_size], where "num_units" corresponds to the number - * of output nodes. - * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input - * tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should - * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, - * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * - * Outputs: - * * 0: The output tensor, of shape [batch_size, num_units]. Before NNAPI feature level 3, for - * output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following - * condition must be satisfied: output_scale > input_scale * filter_scale. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_FULLY_CONNECTED = 9, - - /** - * Looks up sub-tensors in the input tensor using a key-value map. - * - * This operator takes for input a tensor of values (Values), - * a one-dimensional tensor of selection values (Lookups) and - * a one-dimensional tensor that maps these values to Values - * indexes. The output tensor is the concatenation of sub-tensors of - * Values as selected by Lookups via Keys. - * - * Think of Values as being sliced along its outer-most dimension. - * The output is a concatenation of selected slices, with one slice - * for each entry of Lookups. The slice selected is the one at the - * same index as the Maps entry that matches the value in Lookups. - * - * For a hit, the corresponding sub-tensor of Values is included - * in the Output tensor. For a miss, the corresponding sub-tensor in - * Output must have zero values. - * - * For example, if Values has shape of [40, 200, 300], - * Keys should have a shape of [40]. If Lookups tensor has shape - * of [3], three slices are being concatenated, so the resulting tensor - * must have the shape of [3, 200, 300]. If the first entry in Lookups - * has the value 123456, that value must be located in Keys tensor. - * If the sixth entry of Keys contains 123456, the sixth slice of Values - * must be selected. If no entry in Keys has 123456, a slice of zeroes - * must be concatenated. - * - * Supported value tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * - * Supported value tensor rank: from 2 - * - * Inputs: - * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with - * shape [ k ]. - * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape - * [ n ]; Keys and Values pair represent a map, i.e., the ith element - * in Keys (Keys[i]) is the key to select the ith sub-tensor in Values - * (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in - * ascending order. - * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension - * must be n. - * - * Outputs: - * * 0: Output. A tensor with shape [ k …]. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, - * the scale and zeroPoint must be the same as input2. - * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup - * hits (True) or not (False). - * Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0 - * and scale 1.0f. - * A non-zero byte represents True, a hit. A zero indicates otherwise. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_HASHTABLE_LOOKUP = 10, - - /** - * Applies L2 normalization along the axis dimension. - * - * The values in the output tensor are computed as: - * - * output[batch, row, col, channel] = - * input[batch, row, col, channel] / - * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) - * - * By default the axis dimension is the last dimension of the input tensor. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * Tensors with rank less than 4 are only supported since NNAPI feature level 3. - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be normalized. - * * 1: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, - * specifying the dimension normalization would be performed on. - * Negative index is used to specify axis from the end (e.g. -1 for - * the last axis). Must be in the range [-n, n). - * Available since NNAPI feature level 3. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} and same shape as input0. - * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, - * the scale must be 1.f / 128 and the zeroPoint must be 128. - * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the scale must be 1.f / 128 and the zeroPoint must be 0. - * - * NOTE: Before NNAPI feature level 4, if the elements along an axis are all zeros, - * the result is undefined. Since NNAPI feature level 4, if the elements along an axis - * are all zeros, the result is logical zero. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_L2_NORMALIZATION = 11, - - /** - * Performs an 2-D L2 pooling operation. - * - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * The values in the output tensor are computed as: - * - * output[b, i, j, c] = - * sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) / - * sum(1)) - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since NNAPI feature level 3. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since NNAPI feature level 3, zero batches is supported for this tensor. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter - * width. - * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter - * height. - * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since NNAPI feature level 3, zero batches is supported for this tensor. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * {@link PaddingCode} values. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter - * width. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter - * height. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth]. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_L2_POOL_2D = 12, - - /** - * Applies Local Response Normalization along the depth dimension. - * - * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the - * last dimension), and each vector is normalized independently. Within a - * given vector, each component is divided by the weighted, squared sum of - * inputs within depth_radius. - * - * The output is calculated using this formula: - * - * sqr_sum[a, b, c, d] = sum( - * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)) - * output = input / pow((bias + alpha * sqr_sum), beta) - * - * For input tensor with rank less than 4, independently normalizes each - * 1-D slice along specified dimension. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: up to 4 - * Tensors with rank less than 4 are only supported since NNAPI feature level 3. - * - * Inputs: - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of - * the normalization window. - * * 2: A scalar, specifying the bias, must not be zero. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias - * value must be of {@link ANEURALNETWORKS_FLOAT16}. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias - * value must be of {@link ANEURALNETWORKS_FLOAT32}. - * * 3: A scalar, specifying the scale factor, alpha. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the - * alpha value must be of {@link ANEURALNETWORKS_FLOAT16}. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the - * alpha value must be of {@link ANEURALNETWORKS_FLOAT32}. - * * 4: A scalar, specifying the exponent, beta. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta - * value must be of {@link ANEURALNETWORKS_FLOAT16}. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta - * value must be of {@link ANEURALNETWORKS_FLOAT32}. - * * 5: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, - * specifying the dimension normalization would be performed on. - * Negative index is used to specify axis from the end (e.g. -1 for - * the last axis). Must be in the range [-n, n). - * Available since NNAPI feature level 3. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13, - - /** - * Computes sigmoid activation on the input tensor element-wise. - * - * The output is calculated using this formula: - * - * output = 1 / (1 + exp(-input)) - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor, specifying the input. - * Since NNAPI feature level 3, this tensor may be zero-sized. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, - * the scale must be 1.f / 256 and the zeroPoint must be 0. - * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the scale must be 1.f / 256 and the zeroPoint must be -128. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_LOGISTIC = 14, - - /** - * Projects an input to a bit vector via locality senstive hashing. - * - * Supported input tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * - * Supported input tensor rank: from 1 - * - * Inputs: - * * 0: Hash functions. Dim.size == 2, DataType: Float. - * Tensor[0].Dim[0]: Number of hash functions. - * Tensor[0].Dim[1]: Number of projected output bits generated by each - * hash function. - * If the projection type is Sparse: - * Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32 - * - * * 1: Input. Dim.size >= 1, no restriction on DataType. - * * 2: Weight. Optional. Dim.size == 1, DataType: Float. - * If not set, each input element is considered to have the same weight - * of 1.0. - * Tensor[1].Dim[0] == Tensor[2].Dim[0] - * * 3: Type: - * Sparse: - * Value LSHProjectionType_SPARSE(=3) (since NNAPI feature level 3). - * Computed bit vector is considered to be sparse. - * Each output element is an int32 made up of multiple bits - * computed from hash functions. - * - * NOTE: To avoid collisions across hash functions, an offset value - * of k * (1 << Tensor[0].Dim[1]) will be added to each signature, - * where k is the index of the hash function. - * - * Value LSHProjectionType_SPARSE_DEPRECATED(=1). - * Legacy behavior that does not include the offset value. - * - * Dense: - * Value LSHProjectionType_DENSE(=2). - * Computed bit vector is considered to be dense. Each output - * element represents a bit and can take the value of either - * 0 or 1. - * - * Outputs: - * * 0: If the projection type is Sparse: - * Output.Dim == { Tensor[0].Dim[0] } - * A tensor of int32 that represents hash signatures. - * - * If the projection type is Dense: - * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] } - * A flattened tensor that represents projected bit vectors. - * - * Available since NNAPI feature level 1. - * The offset value for sparse projections was added in NNAPI feature level 3. - */ - ANEURALNETWORKS_LSH_PROJECTION = 15, - - /** - * Performs a single time step in a Long Short-Term Memory (LSTM) layer - * - * The LSTM operation is described by the following equations. - * - * \f{eqnarray*}{ - * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\ - * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\ - * C_t =& clip(f_t \odot C_{t-1} + i_t \odot - * g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\ - * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\ - * & & \\ - * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj}) - * & if\ there\ is\ a\ projection; \\ - * h_t =& & \\ - * & o_t \odot g(C_t) & otherwise. \\ - * \f} - * Where: - * * \f$x_t\f$ is the input, - * * \f$i_t\f$ is the input gate, - * * \f$f_t\f$ is the forget gate, - * * \f$C_t\f$ is the cell state, - * * \f$o_t\f$ is the output, - * * \f$h_t\f$ is the output state, - * * \f$\sigma\f$ is the logistic sigmoid function, - * * \f$g\f$ is the cell input and cell output activation function, usually - * \f$tahn\f$, - * * \f$W_{xi}\f$ is the input-to-input weight matrix, - * * \f$W_{hi}\f$ is the recurrent to input weight matrix, - * * \f$W_{ci}\f$ is the cell-to-input weight matrix, - * * \f$b_i\f$ is the input gate bias, - * * \f$W_{xf}\f$ is the input-to-forget weight matrix, - * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix, - * * \f$W_{cf}\f$ is the cell-to-forget weight matrix, - * * \f$b_f\f$ is the forget gate bias, - * * \f$W_{xc}\f$ is the input-to-cell weight matrix, - * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix, - * * \f$b_c\f$ is the cell bias, - * * \f$W_{xo}\f$ is the input-to-output weight matrix, - * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix, - * * \f$W_{co}\f$ is the cell-to-output weight matrix, - * * \f$b_o\f$ is the output gate bias, - * * \f$W_{proj}\f$ is the projection weight matrix, - * * \f$b_{proj}\f$ is the projection bias, - * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and - * * \f$t_{proj}\f$ is the threshold for clipping the projected output. - * * \f$\odot\f$ is the - * <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)"> - * Hadamard product</a> that takes two matrices and produces another - * matrix, each element of which is the product of the corresponding - * elements of the input matrices. - * - * Since NNAPI feature level 3 LSTM supports layer normalization. - * In case layer normalization is used, the inputs to internal activation - * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered - * following an approach from section 3.1 from - * https://arxiv.org/pdf/1607.06450.pdf - * - * The operation has the following independently optional inputs: - * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights - * (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all - * have values or neither of them have values (i.e., all set to null). If - * they have values, the peephole optimization is used. - * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights - * (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values, - * or none of them have values. If they have no values, coupling of input - * and forget gates (CIFG) is used, in which case the input gate - * (\f$i_t\f$) is calculated using the following equation instead. - * \f{eqnarray*}{ - * i_t = 1 - f_t - * \f} - * In case peephole optimization is used and CIFG is not used - * cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the - * cell-to-input weights must have no value. - * * The projection weights (\f$W_{proj}\f$) is required only for the - * recurrent projection layer, and should otherwise have no value. - * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a - * value if the recurrent projection layer exists, and should otherwise - * have no value. - * * (NNAPI feature level 3 or later) The four layer normalization weights either all have - * values or none of them have values. Additionally, if CIFG is used, - * input layer normalization weights tensor is omitted and the other layer - * normalization weights either all have values or none of them have - * values. Layer normalization is used when the values of all the layer - * normalization weights are present. - * - * References: - * - * The default non-peephole non-CIFG implementation is based on: - * http://www.bioinf.jku.at/publications/older/2604.pdf - * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural - * Computation, 9(8):1735-1780, 1997. - * - * The peephole implementation and projection layer is based on: - * https://research.google.com/pubs/archive/43905.pdf - * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory - * recurrent neural network architectures for large scale acoustic - * modeling." INTERSPEECH, 2014. - * (However, the concept of peephole optimization was introduced in work - * prior to this paper.) - * - * The coupling of input and forget gate (CIFG) is based on: - * http://arxiv.org/pdf/1503.04069.pdf - * Greff et al. "LSTM: A Search Space Odyssey" - * - * The layer normalization is based on: - * https://arxiv.org/pdf/1607.06450.pdf - * Jimmy Ba et al. "Layer Normalization" - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * All input and output tensors must be of the same type. - * - * Inputs: - * * 0: The input (\f$x_t\f$). - * A 2-D tensor of shape [batch_size, input_size], where “batch_size” - * corresponds to the batching dimension, and “input_size” is the size - * of the input. - * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. - * A 2-D tensor of shape [num_units, input_size], where “num_units” - * corresponds to the number of cell units. - * * 2: The input-to-forget weights (\f$W_{xf}\f$). - * A 2-D tensor of shape [num_units, input_size]. - * * 3: The input-to-cell weights (\f$W_{xc}\f$). - * A 2-D tensor of shape [num_units, input_size]. - * * 4: The input-to-output weights (\f$W_{xo}\f$). - * A 2-D tensor of shape [num_units, input_size]. - * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. - * A 2-D tensor of shape [num_units, output_size], where “output_size” - * corresponds to either the number of cell units (i.e., “num_units”), - * or the second dimension of the “projection_weights”, if defined. - * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). - * A 2-D tensor of shape [num_units, output_size]. - * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). - * A 2-D tensor of shape [num_units, output_size]. - * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). - * A 2-D tensor of shape [num_units, output_size]. - * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 12:The input gate bias (\f$b_i\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 13:The forget gate bias (\f$b_f\f$). - * A 1-D tensor of shape [num_units]. - * * 14:The cell bias (\f$b_c\f$). - * A 1-D tensor of shape [num_units]. - * * 15:The output gate bias (\f$b_o\f$). - * A 1-D tensor of shape [num_units]. - * * 16:The projection weights (\f$W_{proj}\f$). Optional. - * A 2-D tensor of shape [output_size, num_units]. - * * 17:The projection bias (\f$b_{proj}\f$). Optional. - * A 1-D tensor of shape [output_size]. - * * 18:The output state (in) (\f$h_{t-1}\f$). - * A 2-D tensor of shape [batch_size, output_size]. - * * 19:The cell state (in) (\f$C_{t-1}\f$). - * A 2-D tensor of shape [batch_size, num_units]. - * * 20:The activation function (\f$g\f$). - * A value indicating the activation function: - * <ul> - * <li>0: None; - * <li>1: Relu; - * <li>3: Relu6; - * <li>4: Tanh; - * <li>6: Sigmoid. - * </ul> - * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such - * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 - * then clipping is disabled. - * Until NNAPI feature level 3 this scalar must be of type {@link - * ANEURALNETWORKS_FLOAT32}. Since NNAPI feature level 3, if all the input - * tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this - * scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, - * otherwise if all the input tensors have the type {@link - * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link - * ANEURALNETWORKS_FLOAT16}. - * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the - * projection layer, such that values are bound within - * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. - * Until NNAPI feature level 3 this scalar must be of type {@link - * ANEURALNETWORKS_FLOAT32}. Since NNAPI feature level 3, if all the input - * tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this - * scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, - * otherwise if all the input tensors have the type {@link - * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link - * ANEURALNETWORKS_FLOAT16}. - * Since NNAPI feature level 3 there are additional inputs to this op: - * * 23:The input layer normalization weights. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at input gate. - * * 24:The forget layer normalization weights. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at forget gate. - * * 25:The cell layer normalization weights. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at cell gate. - * * 26:The output layer normalization weights. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at output gate. - * - * Outputs: - * * 0: The scratch buffer. - * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or - * [batch_size, num_units * 4] without CIFG. - * * 1: The output state (out) (\f$h_t\f$). - * A 2-D tensor of shape [batch_size, output_size]. - * * 2: The cell state (out) (\f$C_t\f$). - * A 2-D tensor of shape [batch_size, num_units]. - * * 3: The output (\f$o_t\f$). - * A 2-D tensor of shape [batch_size, output_size]. This is effectively - * the same as the current “output state (out)” value. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_LSTM = 16, - - /** - * Performs an 2-D max pooling operation. - * - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * The values in the output tensor are computed as: - * - * output[b, i, j, channel] = - * max_{di, dj} ( - * input[b, strides[1] * i + di, strides[2] * j + dj, channel] - * ) - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since NNAPI feature level 3. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since NNAPI feature level 3, zero batches is supported for this tensor. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter - * width. - * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter - * height. - * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since NNAPI feature level 3, zero batches is supported for this tensor. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * {@link PaddingCode} values. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter - * width. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter - * height. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth]. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_MAX_POOL_2D = 17, - - /** - * Multiplies two tensors, element-wise. - * - * Takes two input tensors of identical {@link OperandCode} and compatible - * dimensions. The output is the product of both input tensors, optionally - * modified by an activation function. - * - * Two dimensions are compatible when: - * 1. they are equal, or - * 2. one of them is 1 - * - * The size of the resulting output is the maximum size along each dimension - * of the input operands. It starts with the trailing dimensions, and works - * its way forward. - * - * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero - * dimension is only compatible with 0 or 1. The size of the output - * dimension is zero if either of corresponding input dimension is zero. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions - * as input0. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, - * the {@link FuseCode} must be "NONE". - * - * Outputs: - * * 0: The product, a tensor of the same {@link OperandCode} as input0. - * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the following condition must be satisfied: - * output_scale > input1_scale * input2_scale. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_MUL = 18, - - /** - * Computes rectified linear activation on the input tensor element-wise. - * - * The output is calculated using this formula: - * - * output = max(0, input) - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor, specifying the input. - * Since NNAPI feature level 3, this tensor may be zero-sized. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_RELU = 19, - - /** - * Computes rectified linear 1 activation on the input tensor element-wise. - * - * The output is calculated using this formula: - * - * output = min(1.f, max(-1.f, input)) - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor, specifying the input. - * Since NNAPI feature level 3, this tensor may be zero-sized. - * - * Outputs: - * * 0: The output tensor of the same shape as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_RELU1 = 20, - - /** - * Computes rectified linear 6 activation on the input tensor element-wise. - * - * The output is calculated using this formula: - * - * output = min(6, max(0, input)) - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor, specifying the input. - * Since NNAPI feature level 3, this tensor may be zero-sized. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_RELU6 = 21, - - /** - * Reshapes a tensor. - * - * Given tensor, this operation returns a tensor that has the same values as - * tensor, but with a newly specified shape. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor, specifying the tensor to be reshaped. - * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the - * shape of the output tensor. The number of elements implied by shape - * must be the same as the number of elements in the input tensor. - * - * If one component of shape is the special value -1, the size of that - * dimension is computed so that the total size remains constant. In - * particular, a shape of [-1] flattens into 1-D. At most one component - * of shape can be -1. - * - * Outputs: - * * 0: The output tensor, of shape specified by the input shape. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_RESHAPE = 22, - - /** - * Resizes images to given size using the bilinear interpretation. - * - * Resized images must be distorted if their output aspect ratio is not the - * same as input aspect ratio. The corner pixels of output may not be the - * same as corner pixels of input. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since NNAPI feature level 3. - * - * Both resizing by shape and resizing by scale are supported. - * - * Inputs (resizing by shape): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. - * Since NNAPI feature level 3, zero batches is supported for this tensor. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output - * width of the output tensor. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output - * height of the output tensor. - * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} - * scalar, default to false. If True, the centers of the 4 corner - * pixels of the input and output tensors are aligned, preserving the - * values at the corner pixels. - * Available since NNAPI feature level 4. - * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} - * scalar, default to false. If True, the pixel centers are assumed to - * be at (0.5, 0.5). This is the default behavior of image.resize in - * TF 2.0. If this parameter is True, then align_corners parameter - * must be False. - * Available since NNAPI feature level 4. - * - * Inputs (resizing by scale, since NNAPI feature level 3): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. Zero batches is supported for this tensor. - * * 1: A scalar, specifying width_scale, the scaling factor of the width - * dimension from the input tensor to the output tensor. The output - * width is calculated as new_width = floor(width * width_scale). - * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is - * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of - * {@link ANEURALNETWORKS_FLOAT32} otherwise. - * * 2: A scalar, specifying height_scale, the scaling factor of the height - * dimension from the input tensor to the output tensor. The output - * height is calculated as new_height = floor(height * height_scale). - * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is - * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of - * {@link ANEURALNETWORKS_FLOAT32} otherwise. - * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} - * scalar, default to false. If True, the centers of the 4 corner - * pixels of the input and output tensors are aligned, preserving the - * values at the corner pixels. - * Available since NNAPI feature level 4. - * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} - * scalar, default to false. If True, the pixel centers are assumed to - * be at (0.5, 0.5). This is the default behavior of image.resize in - * TF 2.0. If this parameter is True, then align_corners parameter - * must be False. - * Available since NNAPI feature level 4. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, new_height, new_width, depth]. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_RESIZE_BILINEAR = 23, - - /** - * A basic recurrent neural network layer. - * - * This layer implements the operation: - * outputs = state = activation(inputs * input_weights + - * state * recurrent_weights + bias) - * - * Where: - * * “input_weights” is a weight matrix that multiplies the inputs; - * * “recurrent_weights” is a weight matrix that multiplies the current - * “state” which itself is the output from the previous time step - * computation; - * * “bias” is a bias vector (added to each output vector in the batch); - * * “activation” is the function passed as the “fused_activation_function” - * argument (if not “NONE”). - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * The input tensors must all be the same type. - * - * Inputs: - * * 0: input. - * A 2-D tensor of shape [batch_size, input_size], where “batch_size” - * corresponds to the batching dimension, and “input_size” is the size - * of the input. - * * 1: weights. - * A 2-D tensor of shape [num_units, input_size], where “num_units” - * corresponds to the number of units. - * * 2: recurrent_weights. - * A 2-D tensor of shape [num_units, num_units], with columns - * corresponding to the weights from each unit. - * * 3: bias. - * A 1-D tensor of shape [num_units]. - * * 4: hidden state (in). - * A 2-D tensor of shape [batch_size, num_units]. - * * 5: fused_activation_function. - * An optional {@link FuseCode} value indicating the - * activation function. If “NONE” is specified then it results in a - * linear activation. - * - * Outputs: - * * 0: hidden state (out). - * A 2-D tensor of shape [batch_size, num_units]. - * - * * 1: output. - * A 2-D tensor of shape [batch_size, num_units]. This is effectively - * the same as the current state value. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_RNN = 24, - - /** - * Computes the softmax activation on the input tensor element-wise, per - * batch, by normalizing the input vector so the maximum coefficient is - * zero. - * - * The output is calculated using this formula: - * - * output[batch, i] = - * exp((input[batch, i] - max(input[batch, :])) * beta) / - * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)} - * - * For input tensor with rank other than 2, the activation will be applied - * independently on each 1-D slice along specified dimension. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4. - * Tensors with rank other than 2 or 4 are only supported since NNAPI feature level 3. - * - * Inputs: - * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. - * Since NNAPI feature level 3, this tensor may be zero-sized. - * * 1: A scalar, specifying the positive scaling factor for the exponent, - * beta. If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scalar - * must be of {@link ANEURALNETWORKS_FLOAT32}. - * If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, then the - * scalar must be of {@link ANEURALNETWORKS_FLOAT16}. - * * 2: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, - * specifying the dimension the activation would be performed on. - * Negative index is used to specify axis from the end (e.g. -1 for - * the last axis). Must be in the range [-n, n). - * Available since NNAPI feature level 3. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, - * the scale must be 1.f / 256 and the zeroPoint must be 0. - * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the scale must be 1.f / 256 and the zeroPoint must be -128. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_SOFTMAX = 25, - - /** - * Rearranges blocks of spatial data, into depth. - * - * More specifically, this op outputs a copy of the input tensor where - * values from the height and width dimensions are moved to the depth - * dimension. The value block_size indicates the input block size and how - * the data is moved. - * - * Chunks of data of size block_size * block_size from depth are rearranged - * into non-overlapping blocks of size block_size x block_size. - * - * The depth of the output tensor is input_depth * block_size * block_size. - * The input tensor's height and width must be divisible by block_size. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since NNAPI feature level 3. - * - * Inputs: - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size. - * block_size must be >=1 and block_size must be a divisor of both the - * input height and width. - * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * - * Outputs: - * * 0: The output 4-D tensor, of shape [batches, height/block_size, - * width/block_size, depth_in*block_size*block_size]. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_SPACE_TO_DEPTH = 26, - - /** - * SVDF op is a kind of stateful layer derived from the notion that a - * densely connected layer that's processing a sequence of input frames can - * be approximated by using a singular value decomposition of each of its - * nodes. The implementation is based on: - * - * https://research.google.com/pubs/archive/43813.pdf - * - * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada. - * “Compressing Deep Neural Networks using a Rank-Constrained Topology”. - * INTERSPEECH, 2015. - * - * It processes the incoming input using a 2-stage filtering mechanism: - * * stage 1 performs filtering on the "features" dimension, whose outputs - * get pushed into a memory of fixed-size memory_size. - * * stage 2 performs filtering on the "time" dimension of the memory_size - * memoized outputs of stage 1. - * - * Specifically, for rank 1, this layer implements the operation: - * - * memory = push(conv1d(inputs, weights_feature, feature_dim, - * "ANEURALNETWORKS_PADDING_VALID")); - * outputs = activation(memory * weights_time + bias); - * - * Where: - * * “weights_feature” is a weights matrix that processes the inputs (by - * convolving the input with every “feature filter”), and whose outputs - * get pushed, stacked in order, into the fixed-size “memory” (the oldest - * entry gets dropped); - * * “weights_time” is a weights matrix that processes the “memory” (by a - * batched matrix multiplication on the num_units); - * * “bias” is an optional bias vector (added to each output vector in the - * batch); and - * * “activation” is the function passed as the “fused_activation_function” - * argument (if not “NONE”). - * - * Each rank adds a dimension to the weights matrices by means of stacking - * the filters. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * All input tensors must be the same type. - * - * Inputs: - * * 0: input. - * A 2-D tensor of shape [batch_size, input_size], where “batch_size” - * corresponds to the batching dimension, and “input_size” is the size - * of the input. - * * 1: weights_feature. - * A 2-D tensor of shape [num_units, input_size], where “num_units” - * corresponds to the number of units. - * * 2: weights_time. - * A 2-D tensor of shape [num_units, memory_size], where “memory_size” - * corresponds to the fixed-size of the memory. - * * 3: bias. - * An optional 1-D tensor of shape [num_units]. - * * 4: state (in). - * A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank]. - * * 5: rank. - * The rank of the SVD approximation. - * * 6: fused_activation_function. - * An optional {@link FuseCode} value indicating the - * activation function. If “NONE” is specified then it results in a - * linear activation. - * - * Outputs: - * * 0: state (out). - * A 2-D tensor of the same {@link OperandCode} as the inputs, with shape - * [batch_size, (memory_size - 1) * num_units * rank]. - * * 1: output. - * A 2-D tensor of the same {@link OperandCode} as the inputs, with shape - * [batch_size, num_units]. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_SVDF = 27, - - /** - * Computes hyperbolic tangent of input tensor element-wise. - * - * The output is calculated using this formula: - * - * output = tanh(input) - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4. - * - * Inputs: - * * 0: A tensor, specifying the input. - * Since NNAPI feature level 3, this tensor may be zero-sized. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, - * the scale must be 1.f / 128 and the zeroPoint must be 128. - * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the scale must be 1.f / 128 and the zeroPoint must be 0. - * - * Available since NNAPI feature level 1. - */ - ANEURALNETWORKS_TANH = 28, - - // Operations below are available since NNAPI feature level 2. - - /** - * BatchToSpace for N-dimensional tensors. - * - * This operation reshapes the batch dimension (dimension 0) into M + 1 - * dimensions of shape block_shape + [batch], interleaves these blocks back - * into the grid defined by the spatial dimensions [1, ..., M], to obtain a - * result with the same rank as the input. - * - * This is the reverse of SpaceToBatch. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since NNAPI feature level 3. - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be reshaped - * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block - * sizes for each spatial dimension of the input tensor. All values - * must be >= 1. - * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since API level 29. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 2. - */ - ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29, - - /** - * Element-wise division of two tensors. - * - * Takes two input tensors of identical {@link OperandCode} and compatible - * dimensions. The output is the result of dividing the first input tensor - * by the second, optionally modified by an activation function. - * - * For inputs of {@link ANEURALNETWORKS_TENSOR_INT32}, performs - * "floor division" ("//" in Python). For example, - * 5 // 2 = 2 - * -5 // 2 = -3 - * - * Two dimensions are compatible when: - * 1. they are equal, or - * 2. one of them is 1 - * - * The size of the output is the maximum size along each dimension of the - * input operands. It starts with the trailing dimensions, and works its way - * forward. - * - * Example: - * input1.dimension = {4, 1, 2} - * input2.dimension = {5, 4, 3, 1} - * output.dimension = {5, 4, 3, 2} - * - * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero - * dimension is only compatible with 0 or 1. The size of the output - * dimension is zero if either of corresponding input dimension is zero. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the first input. - * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions - * as input0. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, - * the {@link FuseCode} must be "NONE". - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * - * Available since NNAPI feature level 2. - */ - ANEURALNETWORKS_DIV = 30, - - /** - * Computes the mean of elements across dimensions of a tensor. - * - * Reduces the input tensor along the given dimensions to reduce. Unless - * keep_dims is true, the rank of the tensor is reduced by 1 for each entry - * in axis. If keep_dims is true, the reduced dimensions are retained with - * length 1. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: A tensor, specifying the input. - * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions - * to reduce. Must be in the range - * [-rank(input_tensor), rank(input_tensor)). - * - * NOTE: When the operation was introduced, the documentation - * incorrectly stated that if dimensions were empty, the operation - * would reduce across all dimensions. This behavior was never - * implemented. - * - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - * - * Available since NNAPI feature level 2. - */ - ANEURALNETWORKS_MEAN = 31, - - /** - * Pads a tensor. - * - * This operation pads a tensor according to the specified paddings. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * (full support since NNAPI feature level 3, see the output section) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be padded. - * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings - * for each spatial dimension of the input tensor. The shape of the - * tensor must be {rank(input0), 2}. - * padding[i, 0] specifies the number of elements to be padded in the - * front of dimension i. - * padding[i, 1] specifies the number of elements to be padded after the - * end of dimension i. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. The - * output tensor has the same rank as input0, and each - * dimension of the output tensor has the same size as the - * corresponding dimension of the input tensor plus the size - * of the padding: - * output0.dimension[i] = - * padding[i, 0] + input0.dimension[i] + padding[i, 1] - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * NOTE: Before NNAPI feature level 3, the pad value for - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined. - * Since NNAPI feature level 3, the pad value is always the logical zero. - * - * Available since NNAPI feature level 2. - */ - ANEURALNETWORKS_PAD = 32, - - /** - * SpaceToBatch for N-Dimensional tensors. - * - * This operation divides "spatial" dimensions [1, ..., M] of the input into - * a grid of blocks of shape block_shape, and interleaves these blocks with - * the "batch" dimension (0) such that in the output, the spatial dimensions - * [1, ..., M] correspond to the position within the grid, and the batch - * dimension combines both the position within a spatial block and the - * original batch position. Prior to division into blocks, the spatial - * dimensions of the input are optionally zero padded according to paddings. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * (full support since NNAPI feature level 3, see the output section) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since NNAPI feature level 3. - * - * Inputs: - * * 0: An n-D tensor, specifying the input. - * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block - * sizes for each spatial dimension of the input tensor. All values - * must be >= 1. - * * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings - * for each spatial dimension of the input tensor. All values must be - * >= 0. The shape of the tensor must be {M, 2}, where M is the number - * of spatial dimensions. - * padding[i, 0] specifies the number of element to be padded in the - * front of dimension i. - * padding[i, 1] specifies the number of element to be padded after the - * end of dimension i. - * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * Available since NNAPI feature level 3. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * NOTE: Before NNAPI feature level 3, the pad value for - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined. - * Since NNAPI feature level 3, the pad value is always the logical zero. - * - * Available since NNAPI feature level 2. - */ - ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33, - - /** - * Removes dimensions of size 1 from the shape of a tensor. - * - * Given a tensor input, this operation returns a tensor of the same - * {@link OperandCode} with all dimensions of size 1 removed. If you don't - * want to remove all size 1 dimensions, you can remove specific size 1 - * dimensions by specifying the axes (input1). - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, the tensor to be squeezed. - * * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The - * dimensions to squeeze. If specified only squeezes the dimensions - * listed. Otherwise, squeezes all dimensions. The dimension index - * starts at 0. An error must be reported if squeezing a dimension that - * is not 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. Contains the - * same data as input, but has one or more dimensions of size 1 - * removed. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * If all input dimensions are equal to 1 and are to be squeezed, the - * output shape is [1]. - * - * Available since NNAPI feature level 2. - */ - ANEURALNETWORKS_SQUEEZE = 34, - - /** - * Extracts a strided slice of a tensor. - * - * Roughly speaking, this op extracts a slice of size (end - begin) / stride - * from the given input tensor. Starting at the location specified by begin - * the slice continues by adding stride to the index until all dimensions - * are not less than end. Note that a stride can be negative, which causes a - * reverse slice. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be sliced. - * * 1: begin, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The - * starts of the dimensions of the input tensor to be sliced. The - * length must be of rank(input0). - * * 2: end, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The - * ends of the dimensions of the input tensor to be sliced. The length - * must be of rank(input0). - * * 3: strides, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The - * strides of the dimensions of the input tensor to be sliced. The - * length must be of rank(input0). The entries must be non-zero. - * * 4: begin_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit - * of begin_mask is set, begin[i] is ignored and the fullest possible - * range in that dimension is used instead. - * * 5: end_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit of - * end_mask is set, end[i] is ignored and the fullest possible range in - * that dimension is used instead. - * * 6: shrink_axis_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the - * ith bit of shrink_axis_mask is set, the ith dimension specification - * shrinks the dimensionality by 1, taking on the value at index - * begin[i]. In this case, the ith specification must define a - * slice of size 1, e.g. begin[i] = x, end[i] = x + 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0 and rank (n - k), - * where k is the number of bits set in shrink_axis_mask. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * If shrink_axis_mask is true for all input dimensions, the output - * shape is [1]. - * - * Available since NNAPI feature level 2. - */ - ANEURALNETWORKS_STRIDED_SLICE = 35, - - /** - * Element-wise subtraction of two tensors. - * - * Takes two input tensors of identical {@link OperandCode} and compatible - * dimensions. The output is the result of subtracting the second input - * tensor from the first one, optionally modified by an activation function. - * - * Two dimensions are compatible when: - * 1. they are equal, or - * 2. one of them is 1 - * - * The size of the output is the maximum size along each dimension of the - * input operands. It starts with the trailing dimensions, and works its way - * forward. - * - * Example: - * input1.dimension = {4, 1, 2} - * input2.dimension = {5, 4, 3, 1} - * output.dimension = {5, 4, 3, 2} - * - * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero - * dimension is only compatible with 0 or 1. The size of the output - * dimension is zero if either of corresponding input dimension is zero. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the first input. - * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions - * as input0. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor, - * the {@link FuseCode} must be "NONE". - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - * - * Available since NNAPI feature level 2. - */ - ANEURALNETWORKS_SUB = 36, - - /** - * Transposes the input tensor, permuting the dimensions according to the - * perm tensor. - * - * The returned tensor's dimension i corresponds to the input dimension - * perm[i]. If perm is not given, it is set to (n-1...0), where n is the - * rank of the input tensor. Hence by default, this operation performs a - * regular matrix transpose on 2-D input Tensors. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3) - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be transposed. - * Since NNAPI feature level 3, this tensor may be zero-sized. - * * 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, - * the permutation of the dimensions of the input tensor. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 2. - */ - ANEURALNETWORKS_TRANSPOSE = 37, - - // Operations below are available since NNAPI feature level 3. - - /** - * Computes the absolute value of a tensor, element-wise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_ABS = 38, - - /** - * Returns the index of the largest element along an axis. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: An n-D tensor specifying the input. Must be non-empty. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to - * reduce across. Negative index is used to specify axis from the - * end (e.g. -1 for the last axis). Must be in the range [-n, n). - * - * Outputs: - * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor. - * If input is 1-dimensional, the output shape is [1]. - * - * Available since NNAPI feature level 3. - */ - // There is no underscore in ARG_MAX to avoid name conflict with - // the macro defined in libc/kernel/uapi/linux/limits.h. - ANEURALNETWORKS_ARGMAX = 39, - - /** - * Returns the index of the smallest element along an axis. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: An n-D tensor specifying the input. Must be non-empty. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to - * reduce across. Negative index is used to specify axis from the - * end (e.g. -1 for the last axis). Must be in the range [-n, n). - * - * Outputs: - * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor. - * If input is 1-dimensional, the output shape is [1]. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_ARGMIN = 40, // See ARGMAX for naming discussion. - - /** - * Transform axis-aligned bounding box proposals using bounding box deltas. - * - * Given the positions of bounding box proposals and the corresponding - * bounding box deltas for each class, return the refined bounding box - * regions. The resulting bounding boxes are cliped against the edges of - * the image. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM} - * - * Inputs: - * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the - * bounding box proposals, each line with format [x1, y1, x2, y2]. - * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, - * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois - * is supported for this tensor. - * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the - * bounding box delta for each region of interest and each class. The - * bounding box deltas are organized in the following order - * [dx, dy, dw, dh], where dx and dy is the relative correction factor - * for the center position of the bounding box with respect to the width - * and height, dw and dh is the log-scale relative correction factor - * for the width and height. For input0 of type - * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, this tensor should be - * of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is - * supported for this tensor. - * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape - * [num_rois], specifying the batch index of each box. Boxes with - * the same batch index are grouped together. Zero num_rois is - * supported for this tensor. - * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of - * each image in the batch, each line with format - * [image_height, image_width]. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0, with shape - * [num_rois, num_classes * 4], specifying the coordinates of each - * output bounding box for each class, with format [x1, y1, x2, y2]. - * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the - * scale must be 0.125 and the zero point must be 0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM = 41, - - /** - * A recurrent neural network layer that applies an LSTM cell to a - * sequence of inputs in forward and backward directions. - * - * The op supports cross-linking via an auxiliary input. Regular cell feeds - * one input into the two RNN cells in the following way: - * - * INPUT (INPUT_REVERSED) - * | | - * --------------------- - * | FW_LSTM BW_LSTM | - * --------------------- - * | | - * FW_OUT BW_OUT - * - * An op with cross-linking takes two inputs and feeds them into the RNN - * cells in the following way: - * - * AUX_INPUT (AUX_INPUT_REVERSED) - * | | - * INPUT | (INPUT_R'D.)| - * | | | | - * ----------------------- - * | \ / \ / | - * | FW_LSTM BW_LSTM | - * ----------------------- - * | | - * FW_OUT BW_OUT - * - * The cross-linking mode is enabled iff auxiliary input and auxiliary - * weights are present. While stacking this op on top of itself, this - * allows to connect both forward and backward outputs from previous cell - * to the next cell's input. - * - * Since NNAPI feature level 4 parallel linking mode is supported. The mode is - * enabled if auxiliary input is present but auxiliary weights are omitted. - * In this case, the cell feeds inputs into the RNN in the following way: - * - * INPUT (AUX_INPUT_REVERSED) - * | | - * --------------------- - * | FW_LSTM BW_LSTM | - * --------------------- - * | | - * FW_OUT BW_OUT - * - * While stacking this op on top of itself, this allows to connect both - * forward and backward outputs from previous cell to the next cell's - * corresponding inputs. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: 3, either time-major or batch-major. - * - * All input and output tensors must be of the same type. - * - * Inputs: - * * 0: The input. - * A 3-D tensor of shape: - * If time-major: [max_time, batch_size, input_size] - * If batch-major: [batch_size, max_time, input_size] - * where "max_time" is the number of timesteps (sequence length), - * "batch_size" corresponds to the batching dimension, and - * "input_size" is the size of the input. - * * 1: The forward input-to-input weights. Optional. - * A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units” - * corresponds to the number of forward cell units. - * * 2: The forward input-to-forget weights. - * A 2-D tensor of shape [fw_num_units, input_size]. - * * 3: The forward input-to-cell weights. - * A 2-D tensor of shape [fw_num_units, input_size]. - * * 4: The forward input-to-output weights. - * A 2-D tensor of shape [fw_num_units, input_size]. - * * 5: The forward recurrent-to-input weights. Optional. - * A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size” - * corresponds to either the number of cell units (i.e., fw_num_units), - * or the second dimension of the “fw_projection_weights”, if defined. - * * 6: The forward recurrent-to-forget weights. - * A 2-D tensor of shape [fw_num_units, fw_output_size]. - * * 7: The forward recurrent-to-cell weights. - * A 2-D tensor of shape [fw_num_units, fw_output_size]. - * * 8: The forward recurrent-to-output weights. - * A 2-D tensor of shape [fw_num_units, fw_output_size]. - * * 9: The forward cell-to-input weights. Optional. - * A 1-D tensor of shape [fw_num_units]. - * * 10: The forward cell-to-forget weights. Optional. - * A 1-D tensor of shape [fw_num_units]. - * * 11: The forward cell-to-output weights. Optional. - * A 1-D tensor of shape [fw_num_units]. - * * 12: The forward input gate bias. Optional. - * A 1-D tensor of shape [fw_num_units]. - * * 13: The forward forget gate bias. - * A 1-D tensor of shape [fw_num_units]. - * * 14: The forward cell gate bias. - * A 1-D tensor of shape [fw_num_units]. - * * 15: The forward output gate bias. - * A 1-D tensor of shape [fw_num_units]. - * * 16: The forward projection weights. Optional. - * A 2-D tensor of shape [fw_output_size, fw_num_units]. - * * 17: The forward projection bias. Optional. - * A 1-D tensor of shape [fw_output_size]. - * * 18: The backward input-to-input weights. Optional. - * A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units” - * corresponds to the number of backward cell units. - * * 19: The backward input-to-forget weights. - * A 2-D tensor of shape [bw_num_units, input_size]. - * * 20: The backward input-to-cell weights. - * A 2-D tensor of shape [bw_num_units, input_size]. - * * 21: The backward input-to-output weights. - * A 2-D tensor of shape [bw_num_units, input_size]. - * * 22: The backward recurrent-to-input weights. Optional. - * A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size” - * corresponds to either the number of cell units (i.e., “bw_num_units”), - * or the second dimension of the “bw_projection_weights”, if defined. - * * 23: The backward recurrent-to-forget weights. - * A 2-D tensor of shape [bw_num_units, bw_output_size]. - * * 24: The backward recurrent-to-cell weights. - * A 2-D tensor of shape [bw_num_units, bw_output_size]. - * * 25: The backward recurrent-to-output weights. - * A 2-D tensor of shape [bw_num_units, bw_output_size]. - * * 26: The backward cell-to-input weights. Optional. - * A 1-D tensor of shape [bw_num_units]. - * * 27: The backward cell-to-forget weights. Optional. - * A 1-D tensor of shape [bw_num_units]. - * * 28: The backward cell-to-output weights. Optional. - * A 1-D tensor of shape [bw_num_units]. - * * 29: The backward input gate bias. Optional. - * A 1-D tensor of shape [bw_num_units]. - * * 30: The backward forget gate bias. - * A 1-D tensor of shape [bw_num_units]. - * * 31: The backward cell gate bias. - * A 1-D tensor of shape [bw_num_units]. - * * 32: The backward output gate bias. - * A 1-D tensor of shape [bw_num_units]. - * * 33: The backward projection weights. Optional. - * A 2-D tensor of shape [bw_output_size, bw_num_units]. - * * 34: The backward projection bias. Optional. - * A 1-D tensor of shape [bw_output_size]. - * * 35: The forward input activation state. - * A 2-D tensor of shape [batch_size, bw_output_size]. - * * 36: The forward input cell state. - * A 2-D tensor of shape [batch_size, bw_num_units]. - * * 37: The backward input activation state. - * A 2-D tensor of shape [batch_size, bw_output_size]. - * * 38: The backward input cell state. - * A 2-D tensor of shape [batch_size, bw_num_units]. - * * 39: The auxiliary input. Optional. - * A 3-D tensor of shape [max_time, batch_size, aux_input_size], - * where “batch_size” corresponds to the batching dimension, and - * “aux_input_size” is the size of the auxiliary input. Optional. See - * the docs above for the usage modes explanation. - * * 40: The forward auxiliary input-to-input weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [fw_num_units, aux_input_size]. - * * 41: The forward auxiliary input-to-forget weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [fw_num_units, aux_input_size]. - * * 42: The forward auxiliary input-to-cell weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [fw_num_units, aux_input_size]. - * * 43: The forward auxiliary input-to-output weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [fw_num_units, aux_input_size]. - * * 44: The backward auxiliary input-to-input weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [bw_num_units, aux_input_size]. - * * 45: The backward auxiliary input-to-forget weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [bw_num_units, aux_input_size]. - * * 46: The backward auxiliary input-to-cell weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [bw_num_units, aux_input_size]. - * * 47: The backward auxiliary input-to-output weights. - * Optional. See the docs above for the usage modes explanation. - * A 2-D tensor of shape [bw_num_units, aux_input_size]. - * * 48: The activation function. - * A value indicating the activation function: - * <ul> - * <li>0: None; - * <li>1: Relu; - * <li>3: Relu6; - * <li>4: Tanh; - * <li>6: Sigmoid. - * </ul> - * * 49: The clipping threshold for the cell state, such - * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 - * then clipping is disabled. - * If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, - * this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, - * otherwise if all the input tensors have the type - * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be - * of type {@link ANEURALNETWORKS_FLOAT16}. - * * 50: The clipping threshold for the output from the - * projection layer, such that values are bound within - * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. - * If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, - * this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, - * otherwise if all the input tensors have the type - * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be - * of type {@link ANEURALNETWORKS_FLOAT16}. - * * 51: merge_outputs - * An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs - * from forward and backward cells should be merged. - * * 52: time_major - * An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format - * of input and output tensors. - * * 53: The forward input layer normalization weights. Optional. - * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs - * to activation at input gate. - * * 54: The forward forget layer normalization weights. Optional. - * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs - * to activation at forget gate. - * * 55: The forward cell layer normalization weights. Optional. - * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs - * to activation at cell gate. - * * 56: The forward output layer normalization weights. Optional. - * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs - * to activation at output gate. - * * 57: The backward input layer normalization weights. Optional. - * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs - * to activation at input gate. - * * 58: The backward forget layer normalization weights. Optional. - * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs - * to activation at forget gate. - * * 59: The backward cell layer normalization weights. Optional. - * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs - * to activation at cell gate. - * * 60: The backward output layer normalization weights. Optional. - * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs - * to activation at output gate. - * - * Outputs: - * * 0: The forward output. - * A 3-D tensor of shape: - * If time-major and not merge_outputs: - * [max_time, batch_size, fw_output_size] - * If time-major and merge_outputs: - * [max_time, batch_size, fw_output_size + bw_output_size] - * If batch-major and not merge_outputs: - * [batch_size, max_time, fw_output_size] - * If batch-major and merge_outputs: - * [batch_size, max_time, fw_output_size + bw_output_size] - * * 1: The backward output. Unused if merge_outputs is true. - * A 3-D tensor of shape: - * If time-major: [max_time, batch_size, bw_output_size] - * If batch-major: [batch_size, max_time, bw_output_size] - * * 2: The forward activation state output. - * A 2-D tensor of shape [batch_size, fw_output_size] containing an - * activation state from the last time step in the sequence. This - * output is optional and can be omitted. If this output is present - * then outputs 3-5 must be present as well. - * Available since NNAPI feature level 4. - * * 3: The forward cell state output. - * A tensor of shape [batch_size, fw_cell_size] containing a cell state - * from the last time step in the sequence. This output is optional - * and can be omitted. If this output is present - * then outputs 2, 4, 5 must be present as well. - * Available since NNAPI feature level 4. - * * 4: The backward activation state output. - * A 2-D tensor of shape [batch_size, bw_output_size] containing an - * activation state from the last time step in the sequence. This - * output is optional and can be omitted. If this output is present - * then outputs 2, 3, 5 must be present as well. - * Available since NNAPI feature level 4. - * * 5: The backward cell state output. - * A tensor of shape [batch_size, bw_cell_size] containing a cell state - * from the last time step in the sequence. This output is optional - * and can be omitted. If this output is present - * then outputs 2-4 must be present as well. - * Available since NNAPI feature level 4. - * - * Available since NNAPI feature level 3. - * - * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out - * and NNAPI does not maintain internal states. This operator does not support the usage pattern - * in which multiple cells are chained and state tensors are propagated. - */ - ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42, - - /** - * A recurrent neural network layer that applies a basic RNN cell to a - * sequence of inputs in forward and backward directions. - * - * This Op unrolls the input along the sequence dimension, and implements - * the following operation for each element in the sequence s = - * 1...sequence_length: - * fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ + - * fw_state * fw_recurrent_weights’ + fw_bias) - * - * And for each element in sequence t = sequence_length : 1 - * bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ + - * bw_state * bw_recurrent_weights’ + bw_bias) - * - * Where: - * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs; - * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the - * current “state” which itself is the output from the previous time step - * computation; - * * “{fw,bw}_bias” is a bias vector (added to each output vector in the - * batch); - * * “activation” is the function passed as the “fused_activation_function” - * argument (if not “NONE”). - * - * The op supports cross-linking via an auxiliary input. Regular cell feeds - * one input into the two RNN cells in the following way: - * - * INPUT (INPUT_REVERSED) - * | | - * --------------------- - * | FW_RNN BW_RNN | - * --------------------- - * | | - * FW_OUT BW_OUT - * - * An op with cross-linking takes two inputs and feeds them into the RNN - * cells in the following way: - * - * AUX_INPUT (AUX_INPUT_REVERSED) - * | | - * INPUT | (INPUT_R'D.)| - * | | | | - * ----------------------- - * | \ / \ / | - * | FW_RNN BW_RNN | - * ----------------------- - * | | - * FW_OUT BW_OUT - * - * The cross-linking mode is enabled iff auxiliary input and auxiliary - * weights are present. While stacking this op on top of itself, this - * allows to connect both forward and backward outputs from previous cell - * to the next cell's input. - * - * Since NNAPI feature level 4 parallel linking mode is supported. The mode is - * enabled if auxiliary input is present but auxiliary weights are omitted. - * In this case, the cell feeds inputs into the RNN in the following way: - * - * INPUT (AUX_INPUT_REVERSED) - * | | - * --------------------- - * | FW_RNN BW_RNN | - * --------------------- - * | | - * FW_OUT BW_OUT - * - * While stacking this op on top of itself, this allows to connect both - * forward and backward outputs from previous cell to the next cell's - * corresponding inputs. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * The input tensors must all be the same type. - * - * Inputs: - * * 0: input. - * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If - * it is set to true, then the input has a shape [maxTime, batchSize, - * inputSize], otherwise the input has a shape [batchSize, maxTime, - * inputSize]. - * * 1: fwWeights. - * A 2-D tensor of shape [fwNumUnits, inputSize]. - * * 2: fwRecurrentWeights. - * A 2-D tensor of shape [fwNumUnits, fwNumUnits]. - * * 3: fwBias. - * A 1-D tensor of shape [fwNumUnits]. - * * 4: fwHiddenState. - * A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden - * state input for the first time step of the computation. - * * 5: bwWeights. - * A 2-D tensor of shape [bwNumUnits, inputSize]. - * * 6: bwRecurrentWeights. - * A 2-D tensor of shape [bwNumUnits, bwNumUnits]. - * * 7: bwBias. - * A 1-D tensor of shape [bwNumUnits]. - * * 8: bwHiddenState - * A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden - * state input for the first time step of the computation. - * * 9: auxInput. - * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If - * it is set to true, then the input has a shape [maxTime, batchSize, - * auxInputSize], otherwise the input has a shape [batchSize, maxTime, - * auxInputSize]. Can be omitted. See the docs above for the usage - * modes explanation. - * * 10:fwAuxWeights. - * A 2-D tensor of shape [fwNumUnits, auxInputSize]. Can be omitted. - * See the docs above for the usage modes explanation. - * * 11:bwAuxWeights. - * A 2-D tensor of shape [bwNumUnits, auxInputSize]. Can be omitted. - * See the docs above for the usage modes explanation. - * * 12:fusedActivationFunction. - * A {@link FuseCode} value indicating the activation function. If - * “NONE” is specified then it results in a linear activation. - * * 13:timeMajor - * An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format - * of input and output tensors. - * * 14:mergeOutputs - * An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs - * from forward and backward cells are separate (if set to false) or - * concatenated (if set to true). - * Outputs: - * * 0: fwOutput. - * A 3-D tensor. The first two dimensions of the shape are defined by - * the input 6 (timeMajor) and the third dimension is defined by the - * input 14 (mergeOutputs). If timeMajor is set to true, then the first - * two dimensions are [maxTime, batchSize], otherwise they are set to - * [batchSize, maxTime]. If mergeOutputs is set to true, then the third - * dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set - * to fwNumUnits. - * * 1: bwOutput. - * A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then - * this tensor is not produced. The shape is defined by the input 6 - * (timeMajor). If it is set to true, then the shape is set to - * [maxTime, batchSize, bwNumUnits], otherwise the shape is set to - * [batchSize, maxTime, bwNumUnits]. - * * 2: The forward hidden state output. - * A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden - * state from the last time step in the sequence. This output is - * optional and can be omitted. If this output is present then output - * 3 must be present as well. - * Available since NNAPI feature level 4. - * * 3: The backward hidden state output. - * A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden - * state from the last time step in the sequence. This output is - * optional and can be omitted. If this output is present then output - * 2 must be present as well. - * Available since NNAPI feature level 4. - * - * Available since NNAPI feature level 3. - * - * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out - * and NNAPI does not maintain internal states. This operator does not support the usage pattern - * in which multiple cells are chained and state tensors are propagated. - */ - ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN = 43, - - /** - * Greedily selects a subset of bounding boxes in descending order of score. - * - * This op applies NMS algorithm to each class. In each loop of execution, - * the box with maximum score gets selected and removed from the pending set. - * The scores of the rest of boxes are lowered according to the - * intersection-over-union (IOU) overlapping with the previously selected - * boxes and a specified NMS kernel method. Any boxes with score less - * than a threshold are removed from the pending set. - * - * Three NMS kernels are supported: - * * Hard: score_new = score_old * (1 if IoU < threshold else 0) - * * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU) - * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma) - * - * Axis-aligned bounding boxes are represented by its upper-left corner - * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid - * bounding box should satisfy x1 <= x2 and y1 <= y2. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Inputs: - * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score - * of each bounding box proposal. The boxes are grouped by batches in the - * first dimension. Zero num_rois is supported for this tensor. - * * 1: A 2-D Tensor specifying the bounding boxes of shape - * [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2]. - * The boxes are grouped by batches in the first dimension. The sequential - * order of the boxes corresponds with input0. For input0 of type - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should be of - * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and - * scale of 0.125. - * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, - * with zeroPoint of -128 and scale of 0.125. - * Zero num_rois is supported for this tensor. - * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape - * [num_rois], specifying the batch index of each box. Boxes with - * the same batch index are grouped together. - * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, score_threshold. Boxes - * with scores lower than the threshold are filtered before sending - * to the NMS algorithm. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum - * number of selected bounding boxes for each image. Set to a negative - * value for unlimited number of output bounding boxes. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the NMS - * kernel method, options are 0:hard, 1:linear, 2:gaussian. - * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU - * threshold in hard and linear NMS kernel. This field is ignored if - * gaussian kernel is selected. - * * 7: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the sigma in - * gaussian NMS kernel. This field is ignored if gaussian kernel is - * not selected. - * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, nms_score_threshold. - * Boxes with scores lower than the threshold are dropped during the - * score updating phase in soft NMS. - * - * Outputs: - * * 0: A 1-D Tensor of the same {@link OperandCode} as input0, with shape - * [num_output_rois], specifying the score of each output box. The boxes - * are grouped by batches, but the sequential order in each batch is not - * guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, - * guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * or {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the scale and zero point must be the same as input0. - * * 1: A 2-D Tensor of the same {@link OperandCode} as input1, with shape - * [num_output_rois, 4], specifying the coordinates of each - * output bounding box with the same format as input1. The sequential - * order of the boxes corresponds with output0. For type of - * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the scale must be - * 0.125 and the zero point must be 0. - * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape - * [num_output_rois], specifying the class of each output box. The - * sequential order of the boxes corresponds with output0. - * * 3: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape - * [num_output_rois], specifying the batch index of each box. Boxes - * with the same batch index are grouped together. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_BOX_WITH_NMS_LIMIT = 44, - - /** - * Casts a tensor to a type. - * - * This operation ignores the scale and zeroPoint of quanized tensors, - * e.g. it treats a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} input - * as a tensor of uint8 values. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * Since NNAPI feature level 4, casting tensors of the following - * {@link OperandCode} to the same {@link OperandCode} is supported: - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: A tensor with the same shape as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_CAST = 45, - - /** - * Shuffle the channels of the input tensor. - * - * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE - * divide the channel dimension into num_groups groups, and reorganize the - * channels by grouping channels with the same index in each group. - * - * Along the channel dimension, the output is calculated using this formula: - * - * output_channel[k * num_groups + g] = input_channel[g * group_size + k] - * - * where group_size = num_channels / num_groups - * - * The number of channels must be divisible by num_groups. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be shuffled. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of - * groups. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the dimension - * channel shuffle would be performed on. Negative index is used to - * specify axis from the end (e.g. -1 for the last axis). Must be in - * the range [-n, n). - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} and same shape as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_CHANNEL_SHUFFLE = 46, - - /** - * Apply postprocessing steps to bounding box detections. - * - * Bounding box detections are generated by applying transformation on a set - * of predefined anchors with the bounding box deltas from bounding box - * regression. A final step of hard NMS is applied to limit the number of - * returned boxes. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Inputs: - * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying - * the score of each anchor with each class. Class 0 for each - * [batches, num_anchors, 0] is background and will be ignored. - * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with - * the first four values in length_box_encoding specifying the bounding - * box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw], - * where dy and dx is the linear-scale relative correction factor for the - * center position of the bounding box with respect to the width and height, - * dh and dw is the log-scale relative correction factor for the width and - * height. All the entries in length_box_encoding beyond the first four - * values are ignored in this operation. - * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each - * predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and - * ctr_x are the center position of the box, and h and w are the height - * and the width. - * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling - * factor for dy in bounding box deltas. - * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling - * factor for dx in bounding box deltas. - * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling - * factor for dh in bounding box deltas. - * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling - * factor for dw in bounding box deltas. - * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to use regular - * multi-class NMS algorithm that do NMS separately for each class, - * set to false for a faster algorithm that only do one single NMS - * using the highest class score.. - * * 8: An {@link ANEURALNETWORKS_INT32} scalar, max_num_detections, specifying - * the maximum number of boxes for the output. Boxes with the lowest - * scores are discarded to meet the limit. - * * 9: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is - * set to false, specifying the maximum number of classes per detection. - * * 10: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is - * set to true, specifying the maximum number of detections when - * applying NMS algorithm for each single class. - * * 11: A scalar, score_threshold. Boxes with scores lower than the - * threshold are filtered before sending to the NMS algorithm. The - * scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of - * {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of - * {@link ANEURALNETWORKS_FLOAT32} if input0 is of - * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. - * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar - * must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of - * {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of - * {@link ANEURALNETWORKS_FLOAT32} if input0 is of - * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. - * * 13: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to include - * background class in the list of label map for the output, set - * to false to not include the background. When the background - * class is included, it has label 0 and the output classes start - * at 1 in the label map, otherwise, the output classes start at 0. - * - * Outputs: - * * 0: A 2-D tensor of the same {@link OperandCode} as input0, with shape - * [batches, max_num_detections], specifying the score of each output - * detections. - * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the - * coordinates of each output bounding box, with format - * [y1, x1, y2, x2]. - * * 2: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape - * [batches, max_num_detections], specifying the class label for each - * output detection. - * * 3: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape [batches], - * specifying the number of valid output detections for each batch. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_DETECTION_POSTPROCESSING = 47, - - /** - * For input tensors x and y, computes x == y elementwise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandCode} and dimensions compatible - * with input0. - * - * Outputs: - * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_EQUAL = 48, - - /** - * Computes exponential of x element-wise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_EXP = 49, - - /** - * Inserts a dimension of 1 into a tensor's shape. - * - * Given a tensor input, this operation inserts a dimension of 1 at the - * given dimension index of input's shape. The dimension index starts at - * zero; if you specify a negative dimension index, it is counted backward - * from the end. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the dimension - * index to expand. Must be in the range [-(n + 1), (n + 1)). - * - * Outputs: - * * 0: An (n + 1)-D tensor with the same {@link OperandCode} and data as - * input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_EXPAND_DIMS = 50, - - /** - * Gathers values along an axis. - * - * Produces an output tensor with shape - * input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:] - * where: - * # Vector indices (output is rank(input0)). - * output[a_0, ..., a_n, i, b_0, ..., b_n] = - * input0[a_0, ..., a_n, indices[i], b_0, ..., b_n] - * - * # Higher rank indices (output is rank(input0) + rank(indices) - 1). - * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = - * input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: An n-D tensor from which to gather values. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis. - * Negative index is used to specify axis from the end - * (e.g. -1 for the last axis). Must be in the range [-n, n). - * * 2: A k-D tensor {@link ANEURALNETWORKS_TENSOR_INT32} of indices. - * The values must be in the bounds of the corresponding dimensions - * of input0. - * - * Outputs: - * * 0: An (n + k - 1)-D tensor with the same {@link OperandCode} as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_GATHER = 51, - - /** - * Generate aixs-aligned bounding box proposals. - * - * Bounding box proposals are generated by applying transformation on a set - * of predefined anchors with the bounding box deltas from bounding box - * regression. A final step of hard NMS is applied to limit the number of - * returned boxes. - * - * Axis-aligned bounding boxes are represented by its upper-left corner - * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid - * bounding box should satisfy x1 <= x2 and y1 <= y2. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Inputs: - * * 0: A 4-D Tensor specifying the score of each anchor at each - * location. With "NHWC" data layout, the tensor shape is - * [batches, height, width, num_anchors]. With "NCHW" data layout, - * the tensor shape is [batches, num_anchors, height, width]. - * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data - * layout, the tensor shape is [batches, height, width, num_anchors * 4]. - * With "NCHW" data layout, the tensor shape is - * [batches, num_anchors * 4, height, width]. The box deltas are encoded - * in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale - * relative correction factor for the center position of the bounding box - * with respect to the width and height, dw and dh is the log-scale - * relative correction factor for the width and height. The last - * dimensions is the channel dimension. - * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each - * predefined anchor, with format [x1, y1, x2, y2]. For input0 of type - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of - * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with scale of 0.125. - * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of - * each image in the batch, with format [image_height, image_width]. - * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this - * tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with - * scale of 0.125. - * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio - * from the height of original image to the height of feature map. - * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio - * from the width of original image to the width of feature map. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum - * number of boxes before going into the hard NMS algorithm. Boxes - * with the lowest scores are discarded to meet the limit. Set to - * a non-positive value for unlimited number. - * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum - * number of boxes returning from the hard NMS algorithm. Boxes - * with the lowest scores are discarded to meet the limit. Set to - * a non-positive value for unlimited number. - * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU - * threshold for hard NMS. - * * 9: An {@link ANEURALNETWORKS_FLOAT32} scalar, min_size. Boxes with - * height or width lower than the absolute threshold are filtered out. - * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify - * NCHW data layout for input0 and input1. Set to false for NHWC. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0, of shape - * [num_output_rois], specifying the score of each output box. - * The boxes are grouped by batches, but the sequential order in - * each batch is not guaranteed. For type of - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero - * point must be the same as input0. - * * 1: A tensor of the same {@link OperandCode} as input3, of shape - * [num_output_rois, 4], specifying the coordinates of each output - * bounding box for each class, with format [x1, y1, x2, y2]. - * The sequential order of the boxes corresponds with output0. - * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the - * scale must be 0.125 and the zero point must be 0. - * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape - * [num_output_rois], specifying the batch index of each box. Boxes - * with the same batch index are grouped together. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_GENERATE_PROPOSALS = 52, - - /** - * For input tensors x and y, computes x > y elementwise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandCode} and dimensions compatible - * with input0. - * - * Outputs: - * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_GREATER = 53, - /** - * For input tensors x and y, computes x >= y elementwise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandCode} and dimensions compatible - * with input0. - * - * Outputs: - * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_GREATER_EQUAL = 54, - - /** - * Performs a grouped 2-D convolution operation. - * - * Given an input tensor of shape [batches, height, width, depth_in] and a - * filter tensor of shape [depth_out, filter_height, filter_width, depth_group] - * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV - * applies a group of different filters to each input channel group, then - * concatenates the results together. - * - * Specifically, the input channels are divided into num_groups groups, each with - * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional - * filters are also divided into num_groups groups, i.e. depth_out is divisible - * by num_groups. GROUPED_CONV applies each group of filters to the corresponding - * input channel group, and the result are concatenated together. - * - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * The values in the output tensor are computed as: - * - * output[b, i, j, g * channel_multiplier + q] = - * sum_{di, dj, dk} ( - * input[b, strides[1] * i + di, strides[2] * j + dj, - * g * depth_group + dk] * - * filter[g * channel_multiplier + q, di, dj, dk] - * ) + bias[channel] - * - * where channel_multiplier = depth_out / num_groups - * - * Supported tensor {@link OperandCode} configurations: - * * 16 bit floating point: - * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. - * - * * 32 bit floating point: - * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. - * - * * Quantized: - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * * Quantized signed (since NNAPI feature level 4): - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * * Quantized with symmetric per channel quantization for the filter: - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * * Quantized signed with filter symmetric per channel quantization - * (since NNAPI feature level 4): - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input, where depth_in = num_groups * depth_group. - * * 1: A 4-D tensor, of shape - * [depth_out, filter_height, filter_width, depth_group], specifying - * the filter, where depth_out must be divisible by num_groups. For - * tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} - * the channel dimension (channelDim at - * {@link ANeuralNetworksSymmPerChannelQuantParams}) must be set to 0. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or - * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint - * of 0 and bias_scale == input_scale * filter_scale. For filter tensor - * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias - * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of - * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of - * groups. - * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 11: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input, where depth_in = num_groups * depth_group. - * * 1: A 4-D tensor, of shape - * [depth_out, filter_height, filter_width, depth_group], specifying - * the filter, where depth_out must be divisible by num_groups. For - * tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} - * the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) - * must be set to 0. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or - * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same - * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint - * of 0 and bias_scale == input_scale * filter_scale. For filter tensor - * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias - * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of - * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * {@link PaddingCode} values. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of - * groups. - * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_GROUPED_CONV_2D = 55, - - /** - * Localize the maximum keypoints from heatmaps. - * - * This operation approximates the accurate maximum keypoint scores and - * indices after bicubic upscaling by using Taylor expansion up to the - * quadratic term. - * - * The bounding box is represented by its upper-left corner coordinate - * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. - * A valid bounding box should satisfy x1 <= x2 and y1 <= y2. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Inputs: - * * 0: A 4-D Tensor of shape - * [num_boxes, heatmap_size, heatmap_size, num_keypoints], - * specifying the heatmaps, the height and width of heatmaps should - * be the same, and must be greater than or equal to 2. - * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes, - * each with format [x1, y1, x2, y2]. For input0 of type - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should - * be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint - * of 0 and scale of 0.125. - * For input0 of type - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor - * should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with - * zeroPoint of -128 and scale of 0.125. - * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify - * NCHW data layout for input0. Set to false for NHWC. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0, with shape - * [num_boxes, num_keypoints], specifying score of the keypoints. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint can be different from input0 scale and zeroPoint. - * * 1: A tensor of the same {@link OperandCode} as input1, with shape - * [num_boxes, num_keypoints, 2], specifying the location of - * the keypoints, the second dimension is organized as - * [keypoint_x, keypoint_y]. - * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the - * scale must be 0.125 and the zero point must be 0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT = 56, - - /** - * Applies instance normalization to the input tensor. - * - * The values in the output tensor are computed as: - * - * output[b, h, w, c] = - * (input[b, h, w, c] - mean[b, c]) * gamma / - * sqrt(var[b, c] + epsilon) + beta - * - * Where the mean and variance are computed across the spatial dimensions: - * - * mean[b, c] = - * sum_{h, w}(input[b, h, w, c]) / sum(1) - * - * var[b, c] = - * sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1) - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be normalized. - * * 1: A scalar, specifying gamma, the scale applied to the normalized - * tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if - * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of - * {@link ANEURALNETWORKS_FLOAT32} if input0 is of - * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. - * * 2: A scalar, specifying beta, the offset applied to the normalized - * tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if - * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of - * {@link ANEURALNETWORKS_FLOAT32} if input0 is of - * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. - * * 3: A scalar, specifying epsilon, the small value added to variance to - * avoid dividing by zero. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if - * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of - * {@link ANEURALNETWORKS_FLOAT32} if input0 is of - * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. - * * 4: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} and same shape as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_INSTANCE_NORMALIZATION = 57, - - /** - * For input tensors x and y, computes x < y elementwise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandCode} and dimensions compatible - * with input0. - * - * Outputs: - * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_LESS = 58, - - /** - * For input tensors x and y, computes x <= y elementwise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandCode} and dimensions compatible - * with input0. - * - * Outputs: - * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_LESS_EQUAL = 59, - - /** - * Computes natural logarithm of x element-wise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_LOG = 60, - - /** - * Returns the truth value of x AND y element-wise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. - * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions - * compatible with input0. - * - * Outputs: - * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_LOGICAL_AND = 61, - - /** - * Computes the truth value of NOT x element-wise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_LOGICAL_NOT = 62, - - /** - * Returns the truth value of x OR y element-wise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. - * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions - * compatible with input0. - * - * Outputs: - * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_LOGICAL_OR = 63, - - /** - * Computes the log softmax activations given logits. - * - * The output is calculated using this formula: - * - * output = logits * beta - log(reduce_sum(exp(logits * beta), axis)) - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor specifying the input logits. - * * 1: A scalar, specifying the positive scaling factor for the exponent, - * beta. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta - * value must be of {@link ANEURALNETWORKS_FLOAT16}. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta - * value must be of {@link ANEURALNETWORKS_FLOAT32}. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to - * reduce across. Negative index is used to specify axis from the - * end (e.g. -1 for the last axis). Must be in the range [-n, n). - * - * Outputs: - * * 0: The output tensor of the same {@link OperandCode} and shape as - * input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_LOG_SOFTMAX = 64, - - /** - * Returns the element-wise maximum of two tensors. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandCode} and compatible dimensions - * with input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, - * the scales and zeroPoint can be different from input0 scale and zeroPoint. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_MAXIMUM = 65, - - /** - * Returns the element-wise minimum of two tensors. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandCode} and compatible dimensions - * with input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, - * the scales and zeroPoint can be different from input0 scale and zeroPoint. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_MINIMUM = 66, - - /** - * Computes numerical negative value element-wise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_NEG = 67, - - /** - * For input tensors x and y, computes x != y elementwise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * This operation supports broadcasting. - * - * Inputs: - * * 0: A tensor. - * * 1: A tensor of the same {@link OperandCode} and dimensions compatible - * with input0. - * - * Outputs: - * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_NOT_EQUAL = 68, - - /** - * Pads a tensor with the given constant value according to the specified - * paddings. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor, specifying the tensor to be padded. - * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings - * for each spatial dimension of the input tensor. The shape of the - * tensor must be {rank(input0), 2}. - * padding[i, 0] specifies the number of elements to be padded in the - * front of dimension i. - * padding[i, 1] specifies the number of elements to be padded after - * the end of dimension i. - * * 2: A scalar specifying the value to use for padding input0. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the - * pad value must be of {@link ANEURALNETWORKS_FLOAT16}. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the - * pad value must be of {@link ANEURALNETWORKS_FLOAT32}. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the pad value must be of {@link ANEURALNETWORKS_INT32}. The - * scale and zeroPoint are assumed to be the same as in input0. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. The - * output tensor has the same rank as input0, and each - * dimension of the output tensor has the same size as the - * corresponding dimension of the input tensor plus the size - * of the padding: - * output0.dimension[i] = - * padding[i, 0] + input0.dimension[i] + padding[i, 1] - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_PAD_V2 = 69, - - /** - * Computes the power of one value to another. - * - * Given a tensor base and a tensor exponent, this operation computes - * base^exponent elementwise. - * - * This operations supports broadcasting. The size of the output is the - * maximum size along each dimension of the input operands. It starts with - * the trailing dimensions, and works its way forward. - * - * For example: - * base.dimension = {4, 1, 2} - * exponent.dimension = {5, 4, 3, 1} - * output.dimension = {5, 4, 3, 2} - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: A tensor specifying the base. - * * 1: A tensor specifying the exponent. - * - * Outputs: - * * 0: An output tensor. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_POW = 70, - - /** - * Parametric Rectified Linear Unit. - * - * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha - * is a learned array with the same {@link OperandCode} and compatible - * dimensions as input x. - * - * Two dimensions are compatible when: - * 1. they are equal, or - * 2. one of them is 1 - * - * The size of the output is the maximum size along each dimension of the - * input operands. It starts with the trailing dimensions, and works its way - * forward. - * - * Example: - * input.dimension = {4, 1, 2} - * alpha.dimension = {5, 4, 3, 1} - * output.dimension = {5, 4, 3, 2} - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: A tensor, specifying the input. - * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions - * as input0, specifying the alpha. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scales and zeroPoint can be different from input0 scale and zeroPoint. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_PRELU = 71, - - /** - * Quantizes the input tensor. - * - * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} output tensor is: - * - * output = max(0, min(255, round(input / scale) + zeroPoint) - * - * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} output - * tensor is: - * - * output = max(-128, min(127, round(input / scale) + zeroPoint) - * - * Supported input tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported output tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: A tensor, may be zero-sized. - * - * Outputs: - * * 0: The output tensor of same shape as input0, but with - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or. - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_QUANTIZE = 72, - - /** - * A version of quantized LSTM, using 16 bit quantization for internal - * state. - * - * There is no projection layer, so cell state size is equal to the output - * size. - * - * Inputs: - * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and shape [numBatches, inputSize] specifying the input to the LSTM - * cell. Tensor is quantized with a fixed quantization range of - * [-1, 127/128] (scale = 1/128, zeroPoint = 128). - * * 1: The input-to-input weights. - * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and shape [outputSize, inputSize] specifying input-to-input part of - * weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 2: The input-to-forget weights. - * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and shape [outputSize, inputSize] specifying input-to-forget part of - * weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 3: The input-to-cell weights. - * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and shape [outputSize, inputSize] specifying input-to-cell part of - * weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 4: The input-to-output weights. - * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and shape [outputSize, inputSize] specifying input-to-output part of - * weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 5: The recurrent-to-input weights. - * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and shape [outputSize, outputSize] specifying recurrent-to-input part - * of weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 6: The recurrent-to-forget weights. - * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and shape [outputSize, outputSize] specifying recurrent-to-forget - * part of weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 7: The recurrent-to-cell weights. - * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and shape [outputSize, outputSize] specifying recurrent-to-cell part - * of weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 8: The recurrent-to-output weights. - * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and shape [outputSize, outputSize] specifying recurrent-to-output - * part of weights for fully-connected layer inside the LSTM cell. - * Quantization zero point and scale must be the same across all the - * weights. - * * 9: The input gate bias. - * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape - * [outputSize] specifying the bias for the fully-connected layer - * inside the LSTM cell. Bias is quantized with scale being a product - * of input and weights scales and zeroPoint equal to 0. - * * 10:The forget gate bias. - * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape - * [outputSize] specifying the bias for the fully-connected layer - * inside the LSTM cell. Bias is quantized with scale being a product - * of input and weights scales and zeroPoint equal to 0. - * * 11:The cell bias. - * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape - * [outputSize] specifying the bias for the fully-connected layer - * inside the LSTM cell. Bias is quantized with scale being a product - * of input and weights scales and zeroPoint equal to 0. - * * 12:The output gate bias. - * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape - * [outputSize] specifying the bias for the fully-connected layer - * inside the LSTM cell. Bias is quantized with scale being a product - * of input and weights scales and zeroPoint equal to 0. - * * 13: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * and shape [numBatches, outputSize] specifying the cell state from the - * previous time step of the LSTM cell. It is quantized using a - * quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / - * 32768, zeroPoint = 0). - * * 14: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and shape [numBathes, outputSize] specifying the output of the LSTM - * cell from previous time-step. Tensor is quantized with a fixed - * quantization range of [-1, 127/128] (scale = 1/128, zeroPoint = - * 128). - * - * - * Outputs: - * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * and shape [numBatches, outputSize] which contains a cell state from - * the current time step. Tensor is quantized using a quantization - * range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint = - * 0). - * * 1: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and shape [numBathes, outputSize] which contains the output value. - * Tensor is quantized with a fixed quantization range of [-1, 127/128] - * (scale = 1/128, zeroPoint = 128). - */ - ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73, - - /** - * Draws samples from a multinomial distribution. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Inputs: - * * 0: A 2-D tensor with shape [batches, classes], specifying the - * unnormalized log-probabilities for all classes. - * * 1: A scalar {@link ANEURALNETWORKS_INT32}, specifying the number of - * independent samples to draw for each row slice. - * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [2], - * specifying seeds used to initialize the random distribution. If both - * provided seeds are 0, both will be randomly generated. - * Outputs: - * * 0: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape - * [batches, samples], containing the drawn samples. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_RANDOM_MULTINOMIAL = 74, - - /** - * Reduces a tensor by computing the "logical and" of elements along given - * dimensions. - * - * If keep_dims is true, the reduced dimensions are - * retained with length 1. Otherwise, the rank of the tensor is reduced by - * 1 for each entry in dimensions. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions - * to reduce. Dimension values must be in the range [-n, n). - * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_REDUCE_ALL = 75, - - /** - * Reduces a tensor by computing the "logical or" of elements along given - * dimensions. - * - * If keep_dims is true, the reduced dimensions are - * retained with length 1. Otherwise, the rank of the tensor is reduced by - * 1 for each entry in dimensions. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions - * to reduce. Dimension values must be in the range [-n, n). - * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_REDUCE_ANY = 76, - - /** - * Reduces a tensor by computing the maximum of elements along given - * dimensions. - * - * If keep_dims is true, the reduced dimensions are - * retained with length 1. Otherwise, the rank of the tensor is reduced by - * 1 for each entry in dimensions. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions - * to reduce. Dimension values must be in the range [-n, n). - * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_REDUCE_MAX = 77, - - /** - * Reduces a tensor by computing the minimum of elements along given - * dimensions. - * - * If keep_dims is true, the reduced dimensions are - * retained with length 1. Otherwise, the rank of the tensor is reduced by - * 1 for each entry in dimensions. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions - * to reduce. Dimension values must be in the range [-n, n). - * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_REDUCE_MIN = 78, - - /** - * Reduces a tensor by multiplying elements along given dimensions. - * - * If keep_dims is true, the reduced dimensions are - * retained with length 1. Otherwise, the rank of the tensor is reduced by - * 1 for each entry in dimensions. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions - * to reduce. Dimension values must be in the range [-n, n). - * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_REDUCE_PROD = 79, - - /** - * Reduces a tensor by summing elements along given dimensions. - * - * If keep_dims is true, the reduced dimensions are - * retained with length 1. Otherwise, the rank of the tensor is reduced by - * 1 for each entry in dimensions. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: up to 4 - * - * Inputs: - * * 0: An n-D tensor. - * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions - * to reduce. Dimension values must be in the range [-n, n). - * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, - * retains reduced dimensions with length 1. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. - * If all dimensions are reduced and keep_dims is false, the output - * shape is [1]. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_REDUCE_SUM = 80, - - /** - * Select and scale the feature map of each region of interest to a unified - * output size by average pooling sampling points from bilinear interpolation. - * - * The region of interest is represented by its upper-left corner coordinate - * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. - * A spatial scaling factor is applied to map into feature map coordinate. - * A valid region of interest should satisfy x1 <= x2 and y1 <= y2. - * - * No rounding is applied in this operation. The sampling points are unified - * distributed in the pooling bin and their values are calculated by bilinear - * interpolation. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Inputs: - * * 0: A 4-D tensor, specifying the feature map. - * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of - * the regions of interest, each line with format [x1, y1, x2, y2]. - * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, - * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, - * with zeroPoint of 0 and scale of 0.125. Zero num_rois is - * supported for this tensor. - * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape - * [num_rois], specifying the batch index of each box. Boxes with - * the same batch index are grouped together. Zero num_rois is - * supported for this tensor. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output - * height of the output tensor. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output - * width of the output tensor. - * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio - * from the height of original image to the height of feature map. - * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio - * from the width of original image to the width of feature map. - * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of - * sampling points in height dimension used to compute the output. - * Set to 0 for adaptive value of ceil(roi_height/out_height). - * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of - * sampling points in width dimension used to compute the output. - * Set to 0 for adaptive value of ceil(roi_width/out_width). - * * 9: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. The output - * shape is [num_rois, out_height, out_width, depth]. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint can be different from the input0 scale and zeroPoint. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_ROI_ALIGN = 81, - - /** - * Select and scale the feature map of each region of interest to a unified - * output size by max-pooling. - * - * The region of interest is represented by its upper-left corner coordinate - * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. - * A spatial scaling factor is applied to map into feature map coordinate. - * A valid region of interest should satisfy x1 <= x2 and y1 <= y2. - * - * Rounding is applied in this operation to ensure integer boundary for - * regions of interest and pooling bins. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Inputs: - * * 0: A 4-D tensor, specifying the feature map. - * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of - * the regions of interest, each line with format [x1, y1, x2, y2]. - * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, - * with zeroPoint of 0 and scale of 0.125. - * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape - * [num_rois], specifying the batch index of each box. Boxes with - * the same batch index are grouped together. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output - * height of the output tensor. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output - * width of the output tensor. - * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio - * from the height of original image to the height of feature map. - * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio - * from the width of original image to the width of feature map. - * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Outputs: - * * 0: A tensor of the same {@link OperandCode} as input0. The output - * shape is [num_rois, out_height, out_width, depth]. - * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_ROI_POOLING = 82, - - /** - * Computes reciprocal of square root of x element-wise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_RSQRT = 83, - - /** - * Using a tensor of booleans c and input tensors x and y select values - * elementwise from both input tensors: - * - * O[i] = C[i] ? x[i] : y[i]. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: A tensor of type {@link ANEURALNETWORKS_TENSOR_BOOL8} acting as a - * mask that chooses, based on the value at each element, whether the - * corresponding element in the output should be taken from input1 (if - * true) or input2 (if false). - * * 1: An input tensor of the same shape as input0. - * * 2: An input tensor of the same shape and type as input1. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scales and zeroPoint can be different from input1 scale and zeroPoint. - * - * Outputs: - * * 0: A tensor of the same type and shape as input1 and input2. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_SELECT = 84, - - /** - * Computes sin of x element-wise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_SIN = 85, - - /** - * Extracts a slice of specified size from the input tensor starting at a - * specified location. - * - * The starting location is specified as a 1-D tensor containing offsets - * for each dimension. The size is specified as a 1-D tensor containing - * either size of a slice along corresponding dimension or -1. In the latter - * case, all the remaining elements in dimension are included in the slice. - * - * A sum of begin offset and a size of a slice must not exceed size of a - * corresponding dimension. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: An n-D tensor to take slice from, may be zero-sized. - * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying - * the beginning indices of the slice in each dimension. - * * 2: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying - * the size of the slice in each dimension. - * - * Outputs: - * * 0: An n-D tensor of the same type as the input containing the slice. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * its scale and zeroPoint has to be same as the input0 scale and zeroPoint. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_SLICE = 86, - - /** - * Splits a tensor along a given axis into num_splits subtensors. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: An n-D tensor to split. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis along - * which to split. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar indicating the number of - * splits along given axis. Must evenly divide axis size. - * - * Outputs: - * * 0 ~ (num_splits - 1): Resulting subtensors. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_SPLIT = 87, - - /** - * Computes square root of x element-wise. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor. - * - * Outputs: - * * 0: The output tensor of same shape as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_SQRT = 88, - - /** - * Constructs a tensor by tiling a given tensor. - * - * This operation creates a new tensor by replicating `input` `multiples` - * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]` - * elements, and the values of `input` are replicated `multiples[i]` times - * along the i-th dimension. - * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: input, an n-D tensor specifying the input. - * * 1: multiples, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. - * The length of multiples must be n. - * - * Outputs: - * * 0: A tiled tensor of the same {@link OperandCode} and rank as `input`. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_TILE = 89, - - /** - * Finds values and indices of the k largest entries for the last dimension. - * - * Resulting values in each dimensions are sorted in descending order. If - * two values are equal, the one with larger index appears first. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: from 1 - * - * Inputs: - * * 0: input, an n-D tensor specifying the input. - * * 1: k, an {@link ANEURALNETWORKS_INT32} scalar, specifying the number of - * top elements to look for along the last dimension. - * - * Outputs: - * * 0: An n-D tensor of the same type as the input, containing the k - * largest elements along each last dimensional slice. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * * 1: An n-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} - * containing the indices of values within the last dimension of input. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_TOPK_V2 = 90, - - /** - * Performs the transpose of 2-D convolution operation. - * - * This operation is sometimes called "deconvolution" after Deconvolutional - * Networks, but is actually the transpose (gradient) of - * {@link ANEURALNETWORKS_CONV_2D} rather than an actual deconvolution. - * - * The output dimensions are functions of the filter dimensions, stride, and - * padding. - * - * Supported tensor {@link OperandCode} configurations: - * * 16 bit floating point: - * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. - * - * * 32 bit floating point: - * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. - * - * * Quantized: - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * * Quantized with symmetric per channel quantization for the filter: - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Available since NNAPI feature level 4: - * * Quantized signed (since NNAPI feature level 4): - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to - * * * input.scale * filter.scale). - * - * * Quantized signed with filter symmetric per channel quantization - * (since NNAPI feature level 4): - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. - * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. - * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, - * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Both explicit padding and implicit padding are supported. - * - * Inputs (explicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * Since API level 29, zero batches is supported for this tensor. - * * 1: A 4-D tensor, of shape - * [depth_out, filter_height, filter_width, depth_in], specifying the - * filter. For tensor of type - * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel - * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or - * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the - * same type. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, - * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, - * the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 - * and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the left, in the ‘width’ dimension. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the right, in the ‘width’ dimension. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the top, in the ‘height’ dimension. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on - * the bottom, in the ‘height’ dimension. - * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Inputs (implicit padding): - * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], - * specifying the input. - * Since API level 29, zero batches is supported for this tensor. - * * 1: A 4-D tensor, of shape - * [depth_out, filter_height, filter_width, depth_in], specifying the - * filter. For tensor of type - * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel - * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0. - * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input - * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or - * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the - * same type. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, - * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, - * with zeroPoint of 0 and bias_scale == input_scale * filter_scale. - * For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, - * the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 - * and bias_scale of 0. The actual scale of each value 'i' is equal to - * bias_scale[i] = input_scale * filter_scale[i]. - * * 3: An {@link ANEURALNETWORKS_TENSOR_INT32} tensor, specifying the output - * tensor shape. - * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit - * padding scheme, has to be one of the - * {@link PaddingCode} values. - * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘width’ dimension. - * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when - * walking through input in the ‘height’ dimension. - * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the - * {@link FuseCode} values. Specifies the activation to - * invoke on the result. - * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify - * NCHW data layout for input0 and output0. Set to false for NHWC. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint can be different from inputs' scale and zeroPoint. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_TRANSPOSE_CONV_2D = 91, - - /** - * A recurrent neural network specified by an LSTM cell. - * - * Performs (fully) dynamic unrolling of input. - * - * This Op unrolls the input along the time dimension, and implements the - * following operation for each element in the sequence - * s = 1...sequence_length: - * outputs[s] = projection(state = activation(LSTMOp(inputs[s]))) - * - * Where LSTMOp is the LSTM op as in {@link ANEURALNETWORKS_LSTM}, - * the "projection" is an optional projection layer from state and output - * and the “activation” is the function passed as the - * “fused_activation_function” argument (if not “NONE”). - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: 3, either time-major or batch-major. - * - * All input and output tensors must be of the same type. - * - * Inputs: - * * 0: The input (\f$x_t\f$). - * A 3-D tensor of shape: - * If time-major: [max_time, batch_size, input_size] - * If batch-major: [batch_size, max_time, input_size] - * where “max_time” is the number of timesteps (sequence length), - * “batch_size” corresponds to the batching dimension, and - * “input_size” is the size of the input. - * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. - * A 2-D tensor of shape [num_units, input_size], where “num_units” - * corresponds to the number of cell units. - * * 2: The input-to-forget weights (\f$W_{xf}\f$). - * A 2-D tensor of shape [num_units, input_size]. - * * 3: The input-to-cell weights (\f$W_{xc}\f$). - * A 2-D tensor of shape [num_units, input_size]. - * * 4: The input-to-output weights (\f$W_{xo}\f$). - * A 2-D tensor of shape [num_units, input_size]. - * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. - * A 2-D tensor of shape [num_units, output_size], where “output_size” - * corresponds to either the number of cell units (i.e., “num_units”), - * or the second dimension of the “projection_weights”, if defined. - * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). - * A 2-D tensor of shape [num_units, output_size]. - * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). - * A 2-D tensor of shape [num_units, output_size]. - * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). - * A 2-D tensor of shape [num_units, output_size]. - * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 12:The input gate bias (\f$b_i\f$). Optional. - * A 1-D tensor of shape [num_units]. - * * 13:The forget gate bias (\f$b_f\f$). - * A 1-D tensor of shape [num_units]. - * * 14:The cell bias (\f$b_c\f$). - * A 1-D tensor of shape [num_units]. - * * 15:The output gate bias (\f$b_o\f$). - * A 1-D tensor of shape [num_units]. - * * 16:The projection weights (\f$W_{proj}\f$). Optional. - * A 2-D tensor of shape [output_size, num_units]. - * * 17:The projection bias (\f$b_{proj}\f$). Optional. - * A 1-D tensor of shape [output_size]. - * * 18:The output state (in) (\f$h_{t-1}\f$). - * A 2-D tensor of shape [batch_size, output_size]. - * * 19:The cell state (in) (\f$C_{t-1}\f$). - * A 2-D tensor of shape [batch_size, num_units]. - * * 20:The activation function (\f$g\f$). - * A value indicating the activation function: - * <ul> - * <li>0: None; - * <li>1: Relu; - * <li>3: Relu6; - * <li>4: Tanh; - * <li>6: Sigmoid. - * </ul> - * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such - * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 - * then clipping is disabled. - * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the - * projection layer, such that values are bound within - * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. - * * 23:Time-major if true, batch-major if false. - * * 24:The input layer normalization weights. Optional. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at input gate. - * * 25:The forget layer normalization weights. Optional. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at forget gate. - * * 26:The cell layer normalization weights. Optional. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at cell gate. - * * 27:The output layer normalization weights. Optional. - * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs - * to activation at output gate. - * - * Outputs: - * * 0: The output (\f$o_t\f$). - * A 3-D tensor of shape: - * If time-major: [max_time, batch_size, output_size] - * If batch-major: [batch_size, max_time, output_size] - * * 1: A tensor of shape [batch_size, output_size] containing a hidden - * state from the last time step in the sequence. This output is - * optional and can be omitted. If this output is present then - * output #2 must be present as well. - * Available since NNAPI feature level 4. - * * 2: A tensor of shape [batch_size, cell_size] containing a cell state - * from the last time step in the sequence. This output is optional - * and can be omitted. - * Available since NNAPI feature level 4. - * - * Available since NNAPI feature level 3. - * - * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out - * and NNAPI does not maintain internal states. This operator does not support the usage pattern - * in which multiple cells are chained and state tensors are propagated. - */ - ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92, - - /** - * A recurrent neural network layer that applies a basic RNN cell to a - * sequence of inputs. - * - * This layer unrolls the input along the sequence dimension, and implements - * the following operation - * for each element in the sequence s = 1...sequence_length: - * outputs[s] = state = activation(inputs[s] * input_weights’ + state * - * recurrent_weights’ + bias) - * - * Where: - * * “input_weights” is a weight matrix that multiplies the inputs; - * * “recurrent_weights” is a weight matrix that multiplies the current - * “state” which itself is the output from the previous time step - * computation; - * * “bias” is a bias vector (added to each output vector in the batch); - * * “activation” is the function passed as the “fused_activation_function” - * argument (if not “NONE”). - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * The input tensors must all be the same type. - * - * Inputs: - * * 0: input. - * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If - * it is set to 1, then the input has a shape [maxTime, batchSize, - * inputSize], otherwise the input has a shape [batchSize, maxTime, - * inputSize]. - * * 1: weights. - * A 2-D tensor of shape [numUnits, inputSize]. - * * 2: recurrent_weights. - * A 2-D tensor of shape [numUnits, numUnits]. - * * 3: bias. - * A 1-D tensor of shape [numUnits]. - * * 4: hidden state - * A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden - * state input for the first time step of the computation. - * * 5: fusedActivationFunction. - * A {@link FuseCode} value indicating the activation function. If - * “NONE” is specified then it results in a linear activation. - * * 6: timeMajor - * An {@link ANEURALNETWORKS_INT32} scalar specifying the shape format - * of input and output tensors. Must be set to either 0 or 1. - * Outputs: - * * 0: output. - * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If - * it is set to 1, then the output has a shape [maxTime, batchSize, - * numUnits], otherwise the output has a shape [batchSize, maxTime, - * numUnits]. - * * 1: A tensor of shape [batchSize, numUnits] containing hidden state - * from the last time step in the sequence. This output is optional - * and can be omitted. - * Available since NNAPI feature level 4. - * - * Available since NNAPI feature level 3. - * - * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out - * and NNAPI does not maintain internal states. This operator does not support the usage pattern - * in which multiple cells are chained and state tensors are propagated. - */ - ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93, - - /** - * Resizes images to given size using the nearest neighbor interpretation. - * - * Resized images must be distorted if their output aspect ratio is not the - * same as input aspect ratio. The corner pixels of output may not be the - * same as corner pixels of input. - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4) - * - * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. - * With the default data layout NHWC, the data is stored in the order of: - * [batch, height, width, channels]. Alternatively, the data layout could - * be NCHW, the data storage order of: [batch, channels, height, width]. - * - * Both resizing by shape and resizing by scale are supported. - * - * Inputs (resizing by shape): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. Zero batches is supported for this tensor. - * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output - * width of the output tensor. - * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output - * height of the output tensor. - * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} - * scalar, default to false. If True, the centers of the 4 corner - * pixels of the input and output tensors are aligned, preserving the - * values at the corner pixels. - * Available since NNAPI feature level 4. - * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} - * scalar, default to false. If True, the pixel centers are assumed to - * be at (0.5, 0.5). This is the default behavior of image.resize in - * TF 2.0. If this parameter is True, then align_corners parameter - * must be False. - * Available since NNAPI feature level 4. - * - * Inputs (resizing by scale): - * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying - * the input. Zero batches is supported for this tensor. - * * 1: A scalar, specifying width_scale, the scaling factor of the width - * dimension from the input tensor to the output tensor. The output - * width is calculated as new_width = floor(width * width_scale). - * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is - * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of - * {@link ANEURALNETWORKS_FLOAT32} otherwise. - * * 2: A scalar, specifying height_scale, the scaling factor of the height - * dimension from the input tensor to the output tensor. The output - * height is calculated as new_height = floor(height * height_scale). - * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is - * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of - * {@link ANEURALNETWORKS_FLOAT32} otherwise. - * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false. - * Set to true to specify NCHW data layout for input0 and output0. - * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL} - * scalar, default to false. If True, the centers of the 4 corner - * pixels of the input and output tensors are aligned, preserving the - * values at the corner pixels. - * Available since NNAPI feature level 4. - * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL} - * scalar, default to false. If True, the pixel centers are assumed to - * be at (0.5, 0.5). This is the default behavior of image.resize in - * TF 2.0. If this parameter is True, then align_corners parameter - * must be False. - * Available since NNAPI feature level 4. - * - * Outputs: - * * 0: The output 4-D tensor, of shape - * [batches, new_height, new_width, depth]. - * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and - * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor, - * the scale and zeroPoint must be the same as input0. - * - * Available since NNAPI feature level 3. - */ - ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94, - - // Operations below are available since NNAPI feature level 4. - - /** - * Quantized version of {@link ANEURALNETWORKS_LSTM}. - * - * The input and the output use asymmetric quantized types, while the rest - * use symmetric ones. - * - * Inputs: - * * 0: The input to the LSTM cell. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} - * Shape: [batchSize, inputSize] - * * 1: The input-to-input weights. Optional. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} - * Shape: [numUnits, inputSize] - * * 2: The input-to-forget weights. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} - * Shape: [numUnits, inputSize] - * * 3: The input-to-cell weights. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} - * Shape: [numUnits, inputSize] - * * 4: The input-to-output weights. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} - * Shape: [numUnits, inputSize] - * * 5: The recurrent-to-input weights. Optional. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} - * Shape: [numUnits, outputSize] - * * 6: The recurrent-to-forget weights. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} - * Shape: [numUnits, outputSize] - * * 7: The recurrent-to-cell weights. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} - * Shape: [numUnits, outputSize] - * * 8: The recurrent-to-output weights. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} - * Shape: [numUnits, outputSize] - * * 9: The cell-to-input weights (for peephole). Optional. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 10: The cell-to-forget weights (for peephole). Optional. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 11: The cell-to-output weights (for peephole). Optional. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 12: The input gate bias. Quantized with scale being the - * product of input and weights scales and zeroPoint equal to 0. - * Optional. - * Type: {@link ANEURALNETWORKS_TENSOR_INT32} - * Shape: [numUnits] - * * 13: The forget gate bias. Quantized with scale being the - * product of input and weights scales and zeroPoint equal to 0. - * Type: {@link ANEURALNETWORKS_TENSOR_INT32} - * Shape: [numUnits] - * * 14: The cell bias. Quantized with scale being the - * product of input and weights scales and zeroPoint equal to 0. - * Type: {@link ANEURALNETWORKS_TENSOR_INT32} - * Shape: [numUnits] - * * 15: The output gate bias. Quantized with scale being the - * product of input and weights scales and zeroPoint equal to 0. - * Type: {@link ANEURALNETWORKS_TENSOR_INT32} - * Shape: [numUnits] - * * 16: The projection weights. Optional. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} - * Shape: [outputSize, numUnits] - * * 17: The projection bias. Quantized with scale being the - * product of input and weights scales and zeroPoint equal to 0. - * Optional. - * Type: {@link ANEURALNETWORKS_TENSOR_INT32} - * Shape: [outputSize] - * * 18: The output from the previous time step. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} - * Shape: [batchSize, outputSize] - * * 19: The cell state from the previous time step. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * Shape: [batchSize, numUnits] - * * 20: The input layer normalization weights. Used to rescale - * normalized inputs to activation at input gate. Optional. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 21: The forget layer normalization weights. Used to - * rescale normalized inputs to activation at forget gate. Optional. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 22: The cell layer normalization weights. Used to rescale - * normalized inputs to activation at cell gate. Optional. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 23: The output layer normalization weights. Used to - * rescale normalized inputs to activation at output gate. Optional. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * Shape: [numUnits] - * * 24: The cell clip. If provided the cell state is clipped - * by this value prior to the cell output activation. Optional. - * Type: {@link ANEURALNETWORKS_FLOAT32}. - * * 25: The projection clip. If provided and projection is enabled, - * this is used for clipping the projected values. Optional. - * Type: {@link ANEURALNETWORKS_FLOAT32}. - * * 26: The scale of the intermediate result of matmul, - * i.e. input to layer normalization, at input gate. - * Type: {@link ANEURALNETWORKS_FLOAT32}. - * * 27: The scale of the intermediate result of matmul, - * i.e. input to layer normalization, at forget gate. - * Type: {@link ANEURALNETWORKS_FLOAT32}. - * * 28: The scale of the intermediate result of matmul, - * i.e. input to layer normalization, at cell gate. - * Type: {@link ANEURALNETWORKS_FLOAT32}. - * * 29: The scale of the intermediate result of matmul, - * i.e. input to layer normalization, at output gate. - * Type: {@link ANEURALNETWORKS_FLOAT32}. - * * 30: The zero point of the hidden state, i.e. input to - * projection. - * Type: {@link ANEURALNETWORKS_INT32}. - * * 31: The scale of the hidden state, i.e. input to - * projection. - * Type: {@link ANEURALNETWORKS_FLOAT32}. - * - * Outputs: - * * 0: The output state (out). - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} - * Shape: [batchSize, outputSize] - * * 1: The cell state (out). - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * Shape: [batchSize, numUnits] - * * 2: The output. This is effectively the same as the current - * "output state (out)" value. - * Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} - * Shape: [batchSize, outputSize] - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_QUANTIZED_LSTM = 95, - - /** - * Executes one of the two referenced models as determined by a boolean - * value. - * - * The inputs and outputs of the two referenced models must agree with the - * signature of this operation. That is, if the operation has (3 + n) inputs - * and m outputs, both models must have n inputs and m outputs with the same - * types, ranks (if specified), dimensions (if specified), scales, - * zeroPoints, and other operand parameters as the corresponding operation - * inputs and outputs. - * - * Inputs: - * * 0: A value of type {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1] - * that determines which of the two referenced models to execute. - * The operand must have fully specified dimensions. - * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the model to be - * executed if the condition is true. - * * 2: A {@link ANEURALNETWORKS_MODEL} reference to the model to be - * executed if the condition is false. - * * 3 ~ (n + 2): Inputs to be passed to the model selected for execution. - * - * Outputs: - * * 0 ~ (m - 1): Outputs produced by the selected model. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_IF = 96, - - /** - * Executes the body model until the condition model outputs false. - * - * The inputs to this operation are the condition model, the body model, - * and operand values for the first iteration of the loop. The values are - * implicitly split into three groups of input-output, state-only, and - * input-only values, as described below. - * - * The outputs of this operation are the final values of input-output - * operands. - * - * Both the condition and body model receive (m + k + n) inputs. - * * The first m (m >= 1) inputs are input-output operands. For the first - * iteration, these are initialized from the corresponding inputs of the - * WHILE operation. In subsequent iterations, their values come from the - * corresponding outputs of the body model produced during the previous - * iteration. - * * The next k (k >= 0) inputs are state-only operands. They are similar to - * the input-output operands, except that their values are no longer - * available after the loop terminates. - * * The last n (n >= 0) inputs are input-only operands. Their values come - * from the corresponding inputs of the WHILE operation. - * - * The body model produces (m + k) outputs. - * * The first m outputs are input-output operands. They become the outputs - * of the WHILE operation when a termination condition is reached. - * * The last k outputs are state-only operands. Their values are no longer - * available after the loop terminates. - * - * The numbers m, k, and n are inferred by the runtime as follows: - * m = (WHILE operation output count) - * k = (body model output count) - m - * n = (body model input count) - m - k - * - * The pseudo-code below illustrates the flow of a WHILE operation with - * inputs condition, body, initial_input_output, initial_state, input_only - * (m = 1, k = 1, n = 1): - * - * input_output = initial_input_output - * state = initial_state - * while condition(input_output, state, input_only): - * input_output, state = body(input_output, state, input_only) - * return input_output - * - * To prevent infinite loops, there is an implicit execution timeout - * associated with each loop ("loop timeout duration"). See {@link - * ANeuralNetworksExecution_setLoopTimeout}. - * - * Inputs: - * * 0: A {@link ANEURALNETWORKS_MODEL} reference to the condition - * model. The model must have (m + k + n) inputs with - * the same types, ranks (if specified), dimensions (if specified), - * scales, zeroPoints, and other operand parameters as the - * corresponding inputs of the WHILE operation and exactly one output - * of {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1]. - * The output operand must have fully specified dimensions. - * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the body model. - * The model must have (m + k + n) inputs and (m + k) outputs with - * the same types, ranks (if specified), dimensions (if specified), - * scales, zeroPoints, and other operand parameters as the - * corresponding inputs and outputs of the WHILE operation. - * * (m inputs): Initial values for input-output operands. - * * (k inputs): Initial values for state-only operands. - * * (n inputs): Values for input-only operands. - * - * Outputs: - * * 0 ~ (m - 1): Outputs produced by the loop. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_WHILE = 97, - - /** - * Computes exponential linear activation on the input tensor element-wise. - * - * The output is calculated using the following formula: - * - * ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1)) - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor, specifying the input. May be zero-sized. - * * 1: A scalar, specifying the alpha parameter. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, - * the alpha value must be of {@link ANEURALNETWORKS_FLOAT16}. - * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, - * the alpha value must be of {@link ANEURALNETWORKS_FLOAT32}. - * - * Outputs: - * * 0: The output tensor of same shape and type as input0. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_ELU = 98, - - /** - * Computes hard-swish activation on the input tensor element-wise. - * - * Hard swish activation is introduced in - * https://arxiv.org/pdf/1905.02244.pdf - * - * The output is calculated using the following formula: - * - * h-swish(x) = x * max(0, min(6, (x + 3))) / 6 - - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A tensor, specifying the input. May be zero-sized. - * - * Outputs: - * * 0: The output tensor of same shape and type as input0. - * Scale and zero point of this tensor may be different from the input - * tensor's parameters. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_HARD_SWISH = 99, - - /** - * Creates a tensor filled with a scalar value. - * - * Supported output tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: A 1-D tensor, specifying the desired output tensor shape. - * * 1: A scalar, specifying the value to fill the output tensors with. - * For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, - * the scalar must be of {@link ANEURALNETWORKS_FLOAT16}. - * For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, - * the scalar must be of {@link ANEURALNETWORKS_FLOAT32}. - * For output tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, - * the scalar must be of {@link ANEURALNETWORKS_INT32}. - * - * Outputs: - * * 0: The output tensor. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_FILL = 100, - - /** - * Returns the rank of a tensor. - * - * The rank of a tensor is the number of dimensions in it. Also known as - * "order", "degree", "ndims". - * - * Supported tensor {@link OperandCode}: - * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} - * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} - * * {@link ANEURALNETWORKS_TENSOR_INT32} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} - * * {@link ANEURALNETWORKS_TENSOR_BOOL8} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} - * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} - * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} - * - * Supported tensor rank: from 1. - * - * Inputs: - * * 0: The input tensor. - * - * Outputs: - * * 0: A scalar of {@link ANEURALNETWORKS_INT32}, specifying the rank - * of the input tensor. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_RANK = 101, -} OperationCode; - -/** - * Fused activation function types. - * - * Available since NNAPI feature level 1. - */ -typedef enum { - /** NO fused activation function. */ - ANEURALNETWORKS_FUSED_NONE = 0, - /** Fused ReLU activation function. */ - ANEURALNETWORKS_FUSED_RELU = 1, - /** Fused ReLU1 activation function. */ - ANEURALNETWORKS_FUSED_RELU1 = 2, - /** Fused ReLU6 activation function. */ - ANEURALNETWORKS_FUSED_RELU6 = 3, -} FuseCode; - -/** - * Implicit padding algorithms. - * - * - * Available since NNAPI feature level 1. - */ -typedef enum { - /** - * SAME padding. - * Padding on both ends are the "same": - * padding_to_beginning = total_padding / 2 - * padding_to_end = (total_padding + 1)/2. - * i.e., for even number of padding, padding to both ends are exactly - * the same; for odd number of padding, padding to the ending is bigger - * than the padding to the beginning by 1. - * - * total_padding is a function of input, stride, dilation and filter size. - * It could be computed as follows: - * out_size = (input + stride - 1) / stride - * effective_filter_size = (filter_size - 1) * dilation + 1 - * needed_input = (out_size - 1) * stride + effective_filter_size - * total_padding = max(0, needed_input - input_size) - * The computation is the same for the horizontal and vertical directions. - */ - ANEURALNETWORKS_PADDING_SAME = 1, - - /** - * VALID padding. - * No padding. When the input size is not evenly divisible by - * the filter size, the input at the end that could not fill - * the whole filter tile will simply be ignored. - */ - ANEURALNETWORKS_PADDING_VALID = 2, -} PaddingCode; - -/** - * Execution preferences. - * - * Available since NNAPI feature level 1. - */ -typedef enum { - /** - * Prefer executing in a way that minimizes battery drain. - * This is desirable for compilations that will be executed often. - */ - ANEURALNETWORKS_PREFER_LOW_POWER = 0, - /** - * Prefer returning a single answer as fast as possible, even if this causes - * more power consumption. - */ - ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1, - /** - * Prefer maximizing the throughput of successive frames, for example when - * processing successive frames coming from the camera. - */ - ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2, -} PreferenceCode; - -/** - * Device types. - * - * The type of NNAPI device. - */ -typedef enum { - /** The device type cannot be provided. */ - ANEURALNETWORKS_DEVICE_UNKNOWN = 0, - /** The device does not fall into any category below. */ - ANEURALNETWORKS_DEVICE_OTHER = 1, - /** The device runs NNAPI models on single or multi-core CPU. */ - ANEURALNETWORKS_DEVICE_CPU = 2, - /** The device can run NNAPI models and also accelerate graphics APIs such - * as OpenGL ES and Vulkan. */ - ANEURALNETWORKS_DEVICE_GPU = 3, - /** Dedicated accelerator for Machine Learning workloads. */ - ANEURALNETWORKS_DEVICE_ACCELERATOR = 4, -} DeviceTypeCode; - -/** - * NNAPI feature levels. - * - * Each update of the NNAPI specification yields a new NNAPI feature level enum value. - * NNAPI feature level corrseponds to an NNAPI specification version that a driver - * and/or the NNAPI runtime can implement. - * - * A feature level up to and including "FEATURE_LEVEL_5" maps directly to - * the Android API level that introduced the corresponding update of the NNAPI - * specification. Feature levels after Android API level 31 have no association with - * API level because the NNAPI specification can be updated between Android API - * releases. Outputs of {@link ANeuralNetworksDevice_getFeatureLevel} and - * {@link ANeuralNetworks_getRuntimeFeatureLevel} must be compared against - * these enum values instead of the Android API level. - */ -typedef enum { - /** NNAPI specification available in Android O-MR1, Android NNAPI feature level 1 */ - ANEURALNETWORKS_FEATURE_LEVEL_1 = 27, - /** NNAPI specification available in Android P, Android NNAPI feature level 2 */ - ANEURALNETWORKS_FEATURE_LEVEL_2 = 28, - /** NNAPI specification available in Android Q, Android NNAPI feature level 3 */ - ANEURALNETWORKS_FEATURE_LEVEL_3 = 29, - /** NNAPI specification available in Android R, Android NNAPI feature level 4 */ - ANEURALNETWORKS_FEATURE_LEVEL_4 = 30, - /** - * NNAPI specification available in Android S, Android NNAPI feature level 5. - * After Android S, the NNAPI specification can be updated between Android - * API releases. - */ - ANEURALNETWORKS_FEATURE_LEVEL_5 = 31, -} FeatureLevelCode; - -/** - * Result codes. - * - * <p>Any NNAPI function can return any result code, including result codes not - * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR} - * indicates a failure of some kind.</p> - * - * <p>Additional information about the nature of a failure can be obtained from - * the device log after enabling NNAPI debugging by setting the debug.nn.vlog - * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p> - * - * Available since NNAPI feature level 1. - */ -typedef enum { - /** - * Operation was successful. - */ - ANEURALNETWORKS_NO_ERROR = 0, - - /** - * Failure caused by not enough available memory. - */ - ANEURALNETWORKS_OUT_OF_MEMORY = 1, - - ANEURALNETWORKS_INCOMPLETE = 2, - - /** - * Failure caused by unexpected null argument. - */ - ANEURALNETWORKS_UNEXPECTED_NULL = 3, - - /** - * Failure caused by invalid function arguments, invalid model definition, - * invalid execution definition or invalid data at execution time. - */ - ANEURALNETWORKS_BAD_DATA = 4, - - /** - * Failure caused by failed model execution. - */ - ANEURALNETWORKS_OP_FAILED = 5, - - /** - * Failure caused by object being in the wrong state. - */ - ANEURALNETWORKS_BAD_STATE = 6, - - /** - * Failure caused by not being able to map a file into memory. - * This may be caused by a file descriptor not being mappable, or an AHardwareBuffer - * not supported by the device. - * Mitigate by reading its content into memory. - */ - ANEURALNETWORKS_UNMAPPABLE = 7, - - /** - * Failure caused by insufficient buffer size provided to a model output. - */ - ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8, - - /** - * Failure caused by a device not being available. - */ - ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9, - - /** - * Failure because a deadline could not be met for a task, but future - * deadlines may still be met for the same task after a short delay. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10, - - /** - * Failure because a deadline could not be met for a task, and future - * deadlines will likely also not be met for the same task even after a - * short delay. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11, - - /** - * Failure because of a resource limitation within the driver, but future - * calls for the same task may still succeed after a short delay. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12, - - /** - * Failure because of a resource limitation within the driver, and future - * calls for the same task will likely also fail even after a short - * delay. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13, - - /** - * Failure indicating an object is in a dead state. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_DEAD_OBJECT = 14, -} ResultCode; - -/** - * For {@link ANeuralNetworksModel_setOperandValue}, values with a - * length smaller or equal to this will be immediately copied into - * the model. The size is in bytes. - * - * Available since NNAPI feature level 1. - */ -enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 }; - -/** - * For {@link ANeuralNetworksCompilation_setCaching}, specify the size - * of the cache token required from the application. The size is in bytes. - * - * Available since NNAPI feature level 3. - */ -enum { ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN = 32 }; - -/** - * Different duration measurements. - * - * Durations are measured in nanoseconds. - * - * Available since NNAPI feature level 3. - */ -typedef enum { - // Execution time on hardware (not driver, which runs on host processor). - ANEURALNETWORKS_DURATION_ON_HARDWARE = 0, - // Execution time in driver (including time on hardware). Excludes overhead - // such as that of the runtime itself and the IPC needed for the runtime to - // communicate with the driver. - ANEURALNETWORKS_DURATION_IN_DRIVER = 1, - // Execution time on hardware, after all dependencies have been signaled. - // If no dependencies specified (for example, if the execution was scheduled other - // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the - // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE. - // Available since NNAPI feature level 4. - ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2, - // Execution time in driver, after all dependencies have been signaled. Excludes - // overhead such as that of the runtime itself and the IPC needed for the runtime - // to communicate with the driver. - // If no dependencies specified (for example, if the execution was scheduled other - // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the - // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER. - // Available since NNAPI feature level 4. - ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3, -} DurationCode; - -/** - * Relative execution priority. - * - * Available since NNAPI feature level 4. - */ -typedef enum { - ANEURALNETWORKS_PRIORITY_LOW = 90, - ANEURALNETWORKS_PRIORITY_MEDIUM = 100, - ANEURALNETWORKS_PRIORITY_HIGH = 110, - ANEURALNETWORKS_PRIORITY_DEFAULT = ANEURALNETWORKS_PRIORITY_MEDIUM, -} PriorityCode; - -/** - * ANeuralNetworksMemory is an opaque type that represents memory. - * - * This type is used to represent shared memory, memory mapped files, - * and similar memories. - * - * By using shared memory, a program can efficiently communicate to the - * runtime and drivers the tensors that define a model. See - * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application - * should typically create one shared memory object that contains every constant tensor - * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be used to - * create shared memory from a file handle. - * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} can be used to - * create shared memory from an AHardwareBuffer handle. - * - * Memory objects can also be used to specify the input and output arguments of - * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory} - * and {@link ANeuralNetworksExecution_setOutputFromMemory}. - * - * When calling {@link ANeuralNetworksModel_setOperandValueFromMemory}, - * {@link ANeuralNetworksExecution_setInputFromMemory} and - * {@link ANeuralNetworksExecution_setOutputFromMemory}, each operand in the shared - * memory object must be aligned on a boundary of a byte size that is a multiple - * of the element type byte size, e.g., a tensor with - * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary. - * - * It is the application's responsibility to ensure that there are no uses of - * the memory after calling {@link ANeuralNetworksMemory_free}. This includes - * any model which references this memory because of a call to - * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation - * created using such a model, any execution object or burst object created - * using such a compilation, or any execution which references this memory - * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or - * {@link ANeuralNetworksExecution_setOutputFromMemory}. - * - * Available since NNAPI feature level 1. - * - * Starting at NNAPI feature level 4, the application may request creation of device native memory - * from {@link ANeuralNetworksMemoryDesc} to avoid potential memory copying and transformation - * overhead between executions. See also {@link ANeuralNetworksMemoryDesc} and - * {@link ANeuralNetworksMemory_createFromDesc}. - */ -typedef struct ANeuralNetworksMemory ANeuralNetworksMemory; - -/** - * ANeuralNetworksModel is an opaque type that contains a description of the - * mathematical operations that constitute the model. - * - * <p>Build the model by calling<ul> - * <li>{@link ANeuralNetworksModel_create}</li> - * <li>{@link ANeuralNetworksModel_addOperation}</li> - * <li>{@link ANeuralNetworksModel_addOperand}</li> - * </ul> - * - * This forms a graph in which each operation and operand is a node, a - * directed edge from an operand to an operation indicates that the - * operand is an input to the operation, and a directed edge from an - * operation to an operand indicates that the operand is an output - * from the operation. This graph must be acyclic. - * - * A model is completed by calling {@link ANeuralNetworksModel_finish}. - * A model is destroyed by calling {@link ANeuralNetworksModel_free}. - * - * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish} - * has been called on it.</p> - * - * <p>It is the application's responsibility to make sure that only one thread - * modifies a model at a given time. It is however safe for more than one - * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p> - * - * <p>It is also the application's responsibility to ensure that there are no - * other uses of the model after calling {@link ANeuralNetworksModel_free}. - * This includes any compilation, execution object or burst object created using - * the model.</p> - * - * Available since NNAPI feature level 1. - */ -typedef struct ANeuralNetworksModel ANeuralNetworksModel; - -/** - * ANeuralNetworksCompilation is an opaque type that can be used to compile - * a machine learning model. - * - * <p>To use:<ul> - * <li>Create a new compilation instance by calling the - * {@link ANeuralNetworksCompilation_create} function or - * {@link ANeuralNetworksCompilation_createForDevices}.</li> - * <li>Set any desired properties on the compilation (for example, - * {@link ANeuralNetworksCompilation_setPreference}).</li> - * <li>Optionally, set the caching signature and the cache directory on the - * compilation by calling {@link ANeuralNetworksCompilation_setCaching}.</li> - * <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li> - * <li>Use the compilation as many times as needed - * with {@link ANeuralNetworksExecution_create} and - * {@link ANeuralNetworksBurst_create}.</li> - * <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free} - * once all executions using the compilation have completed.</li></ul></p> - * - * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}. - * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}. - * - * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish} - * has been called on it.</p> - * - * <p>It is the application's responsibility to make sure that only - * one thread modifies a compilation at a given time. It is however - * safe for more than one thread to use the compilation once - * {@link ANeuralNetworksCompilation_finish} has returned.</p> - * - * <p>It is also the application's responsibility to ensure that there are no other - * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}. - * This includes any execution object or burst object created using the compilation, - * or any memory descriptor with the compilation as part of one of the roles specified by - * {@link ANeuralNetworksMemoryDesc_addInputRole} or - * {@link ANeuralNetworksMemoryDesc_addOutputRole}.</p> - * - * Available since NNAPI feature level 1. - */ -typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; - -/** - * ANeuralNetworksExecution is an opaque type that can be used to apply a machine - * learning model to a set of inputs. - * - * <p>To use:<ul> - * <li>Create a new execution instance by calling the - * {@link ANeuralNetworksExecution_create} function.</li> - * <li>Associate input buffers or memory regions to the model inputs with - * {@link ANeuralNetworksExecution_setInput} or - * {@link ANeuralNetworksExecution_setInputFromMemory}.</li> - * <li>Associate output buffers or memory regions to the model outputs with - * {@link ANeuralNetworksExecution_setOutput} or - * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li> - * <li>Optionally, configure the execution with - * {@link ANeuralNetworksExecution_setLoopTimeout}, - * {@link ANeuralNetworksExecution_setMeasureTiming}, - * {@link ANeuralNetworksExecution_setReusable}, or - * {@link ANeuralNetworksExecution_setTimeout}. - * <li>Apply the model with one of the following:</li><ul> - * <li>Asynchronously with {@link ANeuralNetworksExecution_startCompute} - * or with {@link ANeuralNetworksExecution_startComputeWithDependencies}, - * waiting for the execution to complete with - * {@link ANeuralNetworksEvent_wait}.</li> - * <li>Synchronously with {@link ANeuralNetworksExecution_compute}.</li> - * <li>Synchronously as part of an execution burst with - * {@link ANeuralNetworksExecution_burstCompute}.</li></ul> - * If the execution has been marked as reusable, then you can - * apply the model more than once. - * <li>Destroy the execution with - * {@link ANeuralNetworksExecution_free}.</li></ul></p> - * - * <p>An output buffer or memory region must not overlap with any - * other output buffer or memory region, with an input buffer or - * memory region, or with an operand value in a memory object - * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p> - * - * <p>An execution is in the preparation state after it is created by - * {@link ANeuralNetworksExecution_create}. An execution may only be modified in the preparation - * state. Scheduling a computation by calling {@link ANeuralNetworksExecution_burstCompute}, - * {@link ANeuralNetworksExecution_compute}, {@link ANeuralNetworksExecution_startCompute}, - * or {@link ANeuralNetworksExecution_startComputeWithDependencies} will change the state of - * the execution object to the computation state. When the computation completes, the state of - * the execution object will change from the computation state to the completed state. - * The computation is completed when {@link ANeuralNetworksExecution_compute}, - * {@link ANeuralNetworksExecution_burstCompute}, or {@link ANeuralNetworksEvent_wait} - * has returned.</p> - * - * <p>An execution can be applied to a model with - * {@link ANeuralNetworksExecution_burstCompute}, - * {@link ANeuralNetworksExecution_compute}, - * {@link ANeuralNetworksExecution_startCompute} or - * {@link ANeuralNetworksExecution_startComputeWithDependencies} only once. Create new - * executions to do new evaluations of the model.</p> - * - * <p>Starting at NNAPI feature level 5, the application may call - * {@link ANeuralNetworksExecution_setReusable} to set an execution to be reusable for multiple - * computations. The application may schedule and evaluate a computation again from the completed - * state of a reusable execution. The execution cannot be modified between computations.</p> - * - * <p>It is the application's responsibility to make sure that only one thread - * modifies an execution at a given time. It is however safe for more than one - * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p> - * - * <p>It is also the application's responsibility to ensure that the execution - * either has never been scheduled or has completed (i.e., that - * {@link ANeuralNetworksExecution_burstCompute}, - * {@link ANeuralNetworksExecution_compute}, or - * {@link ANeuralNetworksEvent_wait} has returned) before calling - * {@link ANeuralNetworksExecution_free}.</p>. - * - * <p>It is also the application's responsibility to ensure that there are no other - * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p> - * - * <p>It is the application's responsibility to ensure that there are no concurrent computations - * scheduled and evaluated on the same execution, either by means of - * {@link ANeuralNetworksExecution_compute} or - * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous) - * in different threads, or by means of - * {@link ANeuralNetworksExecution_startCompute} or - * {@link ANeuralNetworksExecution_startComputeWithDependencies} (which are asynchronous). - * It is however safe to schedule and evaluate multiple computations on different executions - * concurrently. (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on - * different burst objects.) The runtime makes no guarantee on the ordering of - * completion of executions. If it's important to the application, the - * application should enforce the ordering by ensuring that one execution - * completes before the next is scheduled (for example, by scheduling all - * executions synchronously within a single thread, or by scheduling all - * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between - * calls to {@link ANeuralNetworksExecution_startCompute}); or by using - * {@link ANeuralNetworksExecution_startComputeWithDependencies} to make the execution wait for a - * list of events to be signaled before starting the actual evaluation.</p> - * - * Available since NNAPI feature level 1. - */ -typedef struct ANeuralNetworksExecution ANeuralNetworksExecution; - -/** - * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand. - */ -typedef struct ANeuralNetworksSymmPerChannelQuantParams { - /** The index of the channel dimension. */ - uint32_t channelDim; - /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */ - uint32_t scaleCount; - /** The array of scaling values for each channel. Each value must be greater than zero. */ - const float* scales; -} ANeuralNetworksSymmPerChannelQuantParams; - -/** - * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency - * of a rapid sequence of executions. It will likely cause overhead if only used - * for a single execution. - * - * ANeuralNetworksBurst serves as a context object for any number of inferences - * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst - * object and the {@link ANeuralNetworksExecution} objects used with it must all - * have been created from the same {@link ANeuralNetworksCompilation} object. - * - * This object is also used as a hint to drivers, providing insight to the - * lifetime of a rapid sequence of executions. For example, a driver may choose - * to increase the clock frequency of its accelerator for the lifetime of a - * burst object. - * - * <p>To use:<ul> - * <li>Create a new burst object by calling the - * {@link ANeuralNetworksBurst_create} function.</li> - * <li>For each execution:</li><ul> - * <li>Create {@link ANeuralNetworksExecution} and configure its - * properties (see {@link ANeuralNetworksExecution} for details).</li> - * <li>Apply the model synchronously with - * {@link ANeuralNetworksExecution_burstCompute}, reusing the same - * {@link ANeuralNetworksBurst} with the new - * {@link ANeuralNetworksExecution}.</li> - * <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul> - * <li>Destroy the burst with - * {@link ANeuralNetworksBurst_free}.</li></ul></p> - * - * Available since NNAPI feature level 3. - */ -typedef struct ANeuralNetworksBurst ANeuralNetworksBurst; - -/** - * ANeuralNetworksOperandType describes the type of an operand. - * - * This structure is used to describe both scalars and tensors. - * - * A tensor operand type with all dimensions specified is "fully - * specified". Whenever possible (i.e., whenever the dimensions are - * known at model construction time), a tensor operand type should be - * (but is not required to be) fully specified, in order to enable the - * best possible performance. - * - * If a tensor operand's type is not fully specified, the dimensions - * of the operand are deduced from the operand types and values of the - * operation for which that operand is an output or from the corresponding - * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input - * operand type in the case of referenced model input operands. - * - * <p>In the following situations, a tensor operand type must be fully - * specified:<ul> - * <li>The operand has a constant value, set by - * {@link ANeuralNetworksModel_setOperandValue} (with a - * non-nullptr buffer) or - * {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li> - * <li>The operand is a model input (see - * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main - * model within a compilation. A fully specified tensor operand type - * must either be provided to {@link ANeuralNetworksModel_addOperand}; - * or it must be provided to the corresponding - * {@link ANeuralNetworksExecution_setInput}, or - * {@link ANeuralNetworksExecution_setInputFromMemory}. - * EXCEPTION: If the input is optional and omitted - * (by passing nullptr for buffer to - * {@link ANeuralNetworksExecution_setInput}) then it need - * not have a fully specified tensor operand type.</li> - * <li>The operand is a model output (see - * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main - * model within a compilation and is to be used with {@link - * ANeuralNetworksExecution_startComputeWithDependencies}. - * A fully specified tensor operand type must either be provided - * to {@link ANeuralNetworksModel_addOperand}; or it must be - * provided to the corresponding - * {@link ANeuralNetworksExecution_setOutput}, or - * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li></ul> - * - * A tensor operand type of specified rank but some number of - * unspecified dimensions is represented by setting dimensionCount to - * the rank and each unspecified dimension to 0. - * - * Available since NNAPI feature level 1. - * - * Starting at NNAPI feature level 3, a tensor operand type of unspecified rank is - * represented by setting dimensionCount to 0 and dimensions to NULL (just as if - * it were a scalar operand type). - */ -typedef struct ANeuralNetworksOperandType { - /** - * The data type, e.g ANEURALNETWORKS_FLOAT32. - */ - int32_t type; - - /** - * The number of dimensions (rank). - * - * Must be 0 for scalars. - */ - uint32_t dimensionCount; - - /** - * The dimensions of the tensor. - * - * Must be nullptr for scalars. - */ - const uint32_t* dimensions; - - /** - * The quantization scale. - * - * Must be 0 when not applicable to an operand type. - * - * See {@link OperandCode}. - */ - float scale; - - /** - * The quantization zero point. - * - * Must be 0 when not applicable to an operand type. - * - * See {@link OperandCode}. - */ - int32_t zeroPoint; -} ANeuralNetworksOperandType; - -/** - * Aliasing to {@link OperationCode}, used in function - * {@link ANeuralNetworksModel_addOperation}. - */ -typedef int32_t ANeuralNetworksOperationType; - -/** - * ANeuralNetworksEvent is an opaque type that represents an event - * that will be signaled once an execution completes. - * - * Available since NNAPI feature level 1. - */ -typedef struct ANeuralNetworksEvent ANeuralNetworksEvent; - -/** - * ANeuralNetworksDevice is an opaque type that represents a device. - * - * This type is used to query basic properties and supported operations of the corresponding - * device, and control which device(s) a model is to be run on. - * - * Available since NNAPI feature level 3. - */ -typedef struct ANeuralNetworksDevice ANeuralNetworksDevice; - -/** - * ANeuralNetworksMemoryDesc is an opaque type that represents a memory descriptor. - * - * A memory descriptor describes the properties of a memory object, and is used by - * {@link ANeuralNetworksMemory_createFromDesc}. - * - * To use: - * - Create a new memory descriptor by calling {@link ANeuralNetworksMemoryDesc_create}. - * - Specify all of the intended input and output roles by calling - * {@link ANeuralNetworksMemoryDesc_addInputRole} and - * {@link ANeuralNetworksMemoryDesc_addOutputRole}. - * - Optionally, specify the memory dimensions by calling - * {@link ANeuralNetworksMemoryDesc_setDimensions}. - * - Complete the memory descriptor with {@link ANeuralNetworksMemoryDesc_finish}. - * - Use the memory descriptor as many times as needed with - * {@link ANeuralNetworksMemory_createFromDesc}. - * - Destroy the memory descriptor with {@link ANeuralNetworksMemoryDesc_free}. - * - * A memory descriptor is completed by calling {@link ANeuralNetworksMemoryDesc_finish}. - * A memory descriptor is destroyed by calling {@link ANeuralNetworksMemoryDesc_free}. - * - * A memory descriptor must not be modified once {@link ANeuralNetworksMemoryDesc_finish} - * has been called on it. - * - * It is the application's responsibility to make sure that only - * one thread modifies a memory descriptor at a given time. It is however - * safe for more than one thread to use the memory descriptor once - * {@link ANeuralNetworksMemoryDesc_finish} has returned. - * - * It is also the application's responsibility to ensure that there are no other - * uses of the memory descriptor after calling {@link ANeuralNetworksMemoryDesc_free}. - * It is however safe to continue using a {@link ANeuralNetworksMemory} object created - * from the memory descriptor. - * - * Available since NNAPI feature level 4. - */ -typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc; - -__END_DECLS - -#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H - -/** @} */
diff --git a/runtime/include/NeuralNetworksWrapper.h b/runtime/include/NeuralNetworksWrapper.h index b4e6d84..e9ac2a3 100644 --- a/runtime/include/NeuralNetworksWrapper.h +++ b/runtime/include/NeuralNetworksWrapper.h
@@ -19,21 +19,15 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_WRAPPER_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_WRAPPER_H -#include <assert.h> -#include <math.h> +#include "NeuralNetworks.h" +#include <math.h> #include <algorithm> #include <optional> #include <string> #include <utility> #include <vector> -#ifdef NNTEST_SLTS -#include "SupportLibrary.h" -#else -#include "NeuralNetworks.h" -#endif - namespace android { namespace nn { namespace wrapper { @@ -53,7 +47,6 @@ TENSOR_QUANT8_SYMM_PER_CHANNEL = ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL, TENSOR_QUANT16_ASYMM = ANEURALNETWORKS_TENSOR_QUANT16_ASYMM, TENSOR_QUANT8_SYMM = ANEURALNETWORKS_TENSOR_QUANT8_SYMM, - TENSOR_QUANT8_ASYMM_SIGNED = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, MODEL = ANEURALNETWORKS_MODEL, }; @@ -63,13 +56,6 @@ PREFER_SUSTAINED_SPEED = ANEURALNETWORKS_PREFER_SUSTAINED_SPEED }; -enum class Duration { - ON_HARDWARE = ANEURALNETWORKS_DURATION_ON_HARDWARE, - IN_DRIVER = ANEURALNETWORKS_DURATION_IN_DRIVER, - FENCED_ON_HARDWARE = ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE, - FENCED_IN_DRIVER = ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER, -}; - enum class ExecutePriority { LOW = ANEURALNETWORKS_PRIORITY_LOW, MEDIUM = ANEURALNETWORKS_PRIORITY_MEDIUM, @@ -90,9 +76,6 @@ UNAVAILABLE_DEVICE = ANEURALNETWORKS_UNAVAILABLE_DEVICE, MISSED_DEADLINE_TRANSIENT = ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT, MISSED_DEADLINE_PERSISTENT = ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT, - - // Functionality needed for this feature is not available on the current device. - FEATURE_LEVEL_TOO_LOW = 100001, }; struct SymmPerChannelQuantParams { @@ -168,50 +151,22 @@ .zeroPoint = 0, }; } - - void updateDimensions(std::vector<uint32_t> ndim) { - dimensions = ndim; - operandType.dimensions = dimensions.size() > 0 ? dimensions.data() : nullptr; - } }; -#ifdef NNTEST_SLTS -#define NNAPI_CALL(apiCall) mNnApi->apiCall -#else -#define NNAPI_CALL(apiCall) apiCall -#endif - class Memory { public: -#ifdef NNTEST_SLTS - // Takes ownership of a ANeuralNetworksMemory - Memory(const NnApiSupportLibrary* nnapi, ANeuralNetworksMemory* memory) - : mNnApi(nnapi), mMemory(memory) {} - - Memory(const NnApiSupportLibrary* nnapi, size_t size, int protect, int fd, size_t offset) - : mNnApi(nnapi) { -#else Memory(size_t size, int protect, int fd, size_t offset) { -#endif - mValid = NNAPI_CALL(ANeuralNetworksMemory_createFromFd( - size, protect, fd, offset, &mMemory)) == ANEURALNETWORKS_NO_ERROR; - } - -#ifdef NNTEST_SLTS - Memory(const NnApiSupportLibrary* nnapi, AHardwareBuffer* buffer) : mNnApi(nnapi) { -#else - Memory(AHardwareBuffer* buffer) { -#endif - mValid = NNAPI_CALL(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &mMemory)) == + mValid = ANeuralNetworksMemory_createFromFd(size, protect, fd, offset, &mMemory) == ANEURALNETWORKS_NO_ERROR; } - ~Memory() { - if (mMemory) { - NNAPI_CALL(ANeuralNetworksMemory_free(mMemory)); - } + Memory(AHardwareBuffer* buffer) { + mValid = ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &mMemory) == + ANEURALNETWORKS_NO_ERROR; } + ~Memory() { ANeuralNetworksMemory_free(mMemory); } + // Disallow copy semantics to ensure the runtime object can only be freed // once. Copy semantics could be enabled if some sort of reference counting // or deep-copy system for runtime objects is added later. @@ -224,9 +179,7 @@ Memory(Memory&& other) { *this = std::move(other); } Memory& operator=(Memory&& other) { if (this != &other) { - if (mMemory) { - NNAPI_CALL(ANeuralNetworksMemory_free(mMemory)); - } + ANeuralNetworksMemory_free(mMemory); mMemory = other.mMemory; mValid = other.mValid; other.mMemory = nullptr; @@ -239,28 +192,17 @@ bool isValid() const { return mValid; } private: -#ifdef NNTEST_SLTS - const NnApiSupportLibrary* mNnApi = nullptr; -#endif ANeuralNetworksMemory* mMemory = nullptr; bool mValid = true; }; class Model { public: -#ifdef NNTEST_SLTS - Model(const NnApiSupportLibrary* nnapi) : mNnApi(nnapi) { -#else Model() { -#endif // TODO handle the value returned by this call - NNAPI_CALL(ANeuralNetworksModel_create(&mModel)); + ANeuralNetworksModel_create(&mModel); } - ~Model() { - if (mModel) { - NNAPI_CALL(ANeuralNetworksModel_free(mModel)); - } - } + ~Model() { ANeuralNetworksModel_free(mModel); } // Disallow copy semantics to ensure the runtime object can only be freed // once. Copy semantics could be enabled if some sort of reference counting @@ -274,9 +216,7 @@ Model(Model&& other) { *this = std::move(other); } Model& operator=(Model&& other) { if (this != &other) { - if (mModel) { - NNAPI_CALL(ANeuralNetworksModel_free(mModel)); - } + ANeuralNetworksModel_free(mModel); mModel = other.mModel; mNextOperandId = other.mNextOperandId; mValid = other.mValid; @@ -289,7 +229,7 @@ Result finish() { if (mValid) { - auto result = static_cast<Result>(NNAPI_CALL(ANeuralNetworksModel_finish(mModel))); + auto result = static_cast<Result>(ANeuralNetworksModel_finish(mModel)); if (result != Result::NO_ERROR) { mValid = false; } @@ -300,13 +240,13 @@ } uint32_t addOperand(const OperandType* type) { - if (NNAPI_CALL(ANeuralNetworksModel_addOperand(mModel, &(type->operandType))) != + if (ANeuralNetworksModel_addOperand(mModel, &(type->operandType)) != ANEURALNETWORKS_NO_ERROR) { mValid = false; } if (type->channelQuant) { - if (NNAPI_CALL(ANeuralNetworksModel_setOperandSymmPerChannelQuantParams( - mModel, mNextOperandId, &type->channelQuant.value().params)) != + if (ANeuralNetworksModel_setOperandSymmPerChannelQuantParams( + mModel, mNextOperandId, &type->channelQuant.value().params) != ANEURALNETWORKS_NO_ERROR) { mValid = false; } @@ -315,7 +255,7 @@ } void setOperandValue(uint32_t index, const void* buffer, size_t length) { - if (NNAPI_CALL(ANeuralNetworksModel_setOperandValue(mModel, index, buffer, length)) != + if (ANeuralNetworksModel_setOperandValue(mModel, index, buffer, length) != ANEURALNETWORKS_NO_ERROR) { mValid = false; } @@ -323,33 +263,32 @@ void setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset, size_t length) { - if (NNAPI_CALL(ANeuralNetworksModel_setOperandValueFromMemory( - mModel, index, memory->get(), offset, length)) != ANEURALNETWORKS_NO_ERROR) { + if (ANeuralNetworksModel_setOperandValueFromMemory(mModel, index, memory->get(), offset, + length) != ANEURALNETWORKS_NO_ERROR) { mValid = false; } } void addOperation(ANeuralNetworksOperationType type, const std::vector<uint32_t>& inputs, const std::vector<uint32_t>& outputs) { - if (NNAPI_CALL(ANeuralNetworksModel_addOperation( - mModel, type, static_cast<uint32_t>(inputs.size()), inputs.data(), - static_cast<uint32_t>(outputs.size()), outputs.data())) != - ANEURALNETWORKS_NO_ERROR) { + if (ANeuralNetworksModel_addOperation(mModel, type, static_cast<uint32_t>(inputs.size()), + inputs.data(), static_cast<uint32_t>(outputs.size()), + outputs.data()) != ANEURALNETWORKS_NO_ERROR) { mValid = false; } } void identifyInputsAndOutputs(const std::vector<uint32_t>& inputs, const std::vector<uint32_t>& outputs) { - if (NNAPI_CALL(ANeuralNetworksModel_identifyInputsAndOutputs( + if (ANeuralNetworksModel_identifyInputsAndOutputs( mModel, static_cast<uint32_t>(inputs.size()), inputs.data(), - static_cast<uint32_t>(outputs.size()), outputs.data())) != - ANEURALNETWORKS_NO_ERROR) { + static_cast<uint32_t>(outputs.size()), + outputs.data()) != ANEURALNETWORKS_NO_ERROR) { mValid = false; } } void relaxComputationFloat32toFloat16(bool isRelax) { - if (NNAPI_CALL(ANeuralNetworksModel_relaxComputationFloat32toFloat16(mModel, isRelax)) == + if (ANeuralNetworksModel_relaxComputationFloat32toFloat16(mModel, isRelax) == ANEURALNETWORKS_NO_ERROR) { mRelaxed = isRelax; } @@ -359,11 +298,6 @@ bool isValid() const { return mValid; } bool isRelaxed() const { return mRelaxed; } -#ifdef NNTEST_SLTS - private: - const NnApiSupportLibrary* mNnApi = nullptr; -#endif - protected: ANeuralNetworksModel* mModel = nullptr; // We keep track of the operand ID as a convenience to the caller. @@ -374,22 +308,8 @@ class Event { public: -#ifdef NNTEST_SLTS - Event(const NnApiSupportLibrary* nnapi) : mNnApi(nnapi) {} - Event(const NnApiSupportLibrary* nnapi, int syncFd) : mNnApi(nnapi) { -#else Event() {} - Event(int syncFd) { -#endif - mValid = NNAPI_CALL(ANeuralNetworksEvent_createFromSyncFenceFd(syncFd, &mEvent)) == - ANEURALNETWORKS_NO_ERROR; - } - - ~Event() { - if (mEvent) { - NNAPI_CALL(ANeuralNetworksEvent_free(mEvent)); - } - } + ~Event() { ANeuralNetworksEvent_free(mEvent); } // Disallow copy semantics to ensure the runtime object can only be freed // once. Copy semantics could be enabled if some sort of reference counting @@ -403,79 +323,38 @@ Event(Event&& other) { *this = std::move(other); } Event& operator=(Event&& other) { if (this != &other) { - if (mEvent) { - NNAPI_CALL(ANeuralNetworksEvent_free(mEvent)); - } -#ifdef NNTEST_SLTS - mNnApi = other.mNnApi; -#endif + ANeuralNetworksEvent_free(mEvent); mEvent = other.mEvent; other.mEvent = nullptr; } return *this; } - Result wait() { return static_cast<Result>(NNAPI_CALL(ANeuralNetworksEvent_wait(mEvent))); } + Result wait() { return static_cast<Result>(ANeuralNetworksEvent_wait(mEvent)); } // Only for use by Execution void set(ANeuralNetworksEvent* newEvent) { - if (mEvent) { - NNAPI_CALL(ANeuralNetworksEvent_free(mEvent)); - } + ANeuralNetworksEvent_free(mEvent); mEvent = newEvent; } // Only for use by Execution ANeuralNetworksEvent* getHandle() const { return mEvent; } - Result getSyncFenceFd(int* sync_fence_fd) { - return static_cast<Result>( - NNAPI_CALL(ANeuralNetworksEvent_getSyncFenceFd(mEvent, sync_fence_fd))); - } - - bool isValid() const { return mValid; } - -#ifdef NNTEST_SLTS private: - const NnApiSupportLibrary* mNnApi = nullptr; -#endif - - private: - bool mValid = true; ANeuralNetworksEvent* mEvent = nullptr; }; class Compilation { public: -#ifdef NNTEST_SLTS - // On success, createForDevice(s) will return Result::NO_ERROR and the created compilation; - // otherwise, it will return the error code and Compilation object wrapping a nullptr handle. - static std::pair<Result, Compilation> createForDevice(const NnApiSupportLibrary* nnapi, - const Model* model, - const ANeuralNetworksDevice* device) { - return createForDevices(nnapi, model, {device}); - } - static std::pair<Result, Compilation> createForDevices( - const NnApiSupportLibrary* nnapi, const Model* model, - const std::vector<const ANeuralNetworksDevice*>& devices) { - ANeuralNetworksCompilation* compilation = nullptr; - const Result result = - static_cast<Result>(nnapi->ANeuralNetworksCompilation_createForDevices( - model->getHandle(), devices.empty() ? nullptr : devices.data(), - devices.size(), &compilation)); - return {result, Compilation(nnapi, compilation)}; - } -#else Compilation(const Model* model) { - int result = - NNAPI_CALL(ANeuralNetworksCompilation_create(model->getHandle(), &mCompilation)); + int result = ANeuralNetworksCompilation_create(model->getHandle(), &mCompilation); if (result != 0) { // TODO Handle the error } } -#endif - ~Compilation() { NNAPI_CALL(ANeuralNetworksCompilation_free(mCompilation)); } + ~Compilation() { ANeuralNetworksCompilation_free(mCompilation); } // Disallow copy semantics to ensure the runtime object can only be freed // once. Copy semantics could be enabled if some sort of reference counting @@ -489,7 +368,7 @@ Compilation(Compilation&& other) { *this = std::move(other); } Compilation& operator=(Compilation&& other) { if (this != &other) { - NNAPI_CALL(ANeuralNetworksCompilation_free(mCompilation)); + ANeuralNetworksCompilation_free(mCompilation); mCompilation = other.mCompilation; other.mCompilation = nullptr; } @@ -497,102 +376,41 @@ } Result setPreference(ExecutePreference preference) { - return static_cast<Result>(NNAPI_CALL(ANeuralNetworksCompilation_setPreference( - mCompilation, static_cast<int32_t>(preference)))); + return static_cast<Result>(ANeuralNetworksCompilation_setPreference( + mCompilation, static_cast<int32_t>(preference))); } Result setPriority(ExecutePriority priority) { - return static_cast<Result>(NNAPI_CALL(ANeuralNetworksCompilation_setPriority( - mCompilation, static_cast<int32_t>(priority)))); + return static_cast<Result>(ANeuralNetworksCompilation_setPriority( + mCompilation, static_cast<int32_t>(priority))); } Result setCaching(const std::string& cacheDir, const std::vector<uint8_t>& token) { if (token.size() != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN) { return Result::BAD_DATA; } - return static_cast<Result>(NNAPI_CALL(ANeuralNetworksCompilation_setCaching( - mCompilation, cacheDir.c_str(), token.data()))); + return static_cast<Result>(ANeuralNetworksCompilation_setCaching( + mCompilation, cacheDir.c_str(), token.data())); } - Result finish() { - return static_cast<Result>(NNAPI_CALL(ANeuralNetworksCompilation_finish(mCompilation))); - } - - Result getPreferredMemoryAlignmentForInput(uint32_t index, uint32_t* alignment) const { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - return static_cast<Result>( - NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput( - mCompilation, index, alignment))); - } else { - return Result::FEATURE_LEVEL_TOO_LOW; - } - }; - - Result getPreferredMemoryPaddingForInput(uint32_t index, uint32_t* padding) const { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - return static_cast<Result>( - NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput( - mCompilation, index, padding))); - } else { - return Result::FEATURE_LEVEL_TOO_LOW; - } - }; - - Result getPreferredMemoryAlignmentForOutput(uint32_t index, uint32_t* alignment) const { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - return static_cast<Result>( - NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput( - mCompilation, index, alignment))); - } else { - return Result::FEATURE_LEVEL_TOO_LOW; - } - }; - - Result getPreferredMemoryPaddingForOutput(uint32_t index, uint32_t* padding) const { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - return static_cast<Result>( - NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput( - mCompilation, index, padding))); - } else { - return Result::FEATURE_LEVEL_TOO_LOW; - } - }; + Result finish() { return static_cast<Result>(ANeuralNetworksCompilation_finish(mCompilation)); } ANeuralNetworksCompilation* getHandle() const { return mCompilation; } -#ifdef NNTEST_SLTS - protected: - // Takes the ownership of ANeuralNetworksCompilation. - Compilation(const NnApiSupportLibrary* nnapi, ANeuralNetworksCompilation* compilation) - : mNnApi(nnapi), mCompilation(compilation) {} - private: - const NnApiSupportLibrary* mNnApi = nullptr; -#else - private: -#endif ANeuralNetworksCompilation* mCompilation = nullptr; }; class Execution { public: -#ifdef NNTEST_SLTS - Execution(const NnApiSupportLibrary* nnapi, const Compilation* compilation) : mNnApi(nnapi) { -#else Execution(const Compilation* compilation) { -#endif - int result = - NNAPI_CALL(ANeuralNetworksExecution_create(compilation->getHandle(), &mExecution)); + int result = ANeuralNetworksExecution_create(compilation->getHandle(), &mExecution); if (result != 0) { // TODO Handle the error } } - ~Execution() { - if (mExecution) { - NNAPI_CALL(ANeuralNetworksExecution_free(mExecution)); - } - } + ~Execution() { ANeuralNetworksExecution_free(mExecution); } // Disallow copy semantics to ensure the runtime object can only be freed // once. Copy semantics could be enabled if some sort of reference counting @@ -606,9 +424,7 @@ Execution(Execution&& other) { *this = std::move(other); } Execution& operator=(Execution&& other) { if (this != &other) { - if (mExecution) { - NNAPI_CALL(ANeuralNetworksExecution_free(mExecution)); - } + ANeuralNetworksExecution_free(mExecution); mExecution = other.mExecution; other.mExecution = nullptr; } @@ -617,51 +433,31 @@ Result setInput(uint32_t index, const void* buffer, size_t length, const ANeuralNetworksOperandType* type = nullptr) { - return static_cast<Result>(NNAPI_CALL( - ANeuralNetworksExecution_setInput(mExecution, index, type, buffer, length))); + return static_cast<Result>( + ANeuralNetworksExecution_setInput(mExecution, index, type, buffer, length)); } Result setInputFromMemory(uint32_t index, const Memory* memory, uint32_t offset, uint32_t length, const ANeuralNetworksOperandType* type = nullptr) { - return static_cast<Result>(NNAPI_CALL(ANeuralNetworksExecution_setInputFromMemory( - mExecution, index, type, memory->get(), offset, length))); + return static_cast<Result>(ANeuralNetworksExecution_setInputFromMemory( + mExecution, index, type, memory->get(), offset, length)); } Result setOutput(uint32_t index, void* buffer, size_t length, const ANeuralNetworksOperandType* type = nullptr) { - return static_cast<Result>(NNAPI_CALL( - ANeuralNetworksExecution_setOutput(mExecution, index, type, buffer, length))); + return static_cast<Result>( + ANeuralNetworksExecution_setOutput(mExecution, index, type, buffer, length)); } Result setOutputFromMemory(uint32_t index, const Memory* memory, uint32_t offset, uint32_t length, const ANeuralNetworksOperandType* type = nullptr) { - return static_cast<Result>(NNAPI_CALL(ANeuralNetworksExecution_setOutputFromMemory( - mExecution, index, type, memory->get(), offset, length))); + return static_cast<Result>(ANeuralNetworksExecution_setOutputFromMemory( + mExecution, index, type, memory->get(), offset, length)); } - Result enableInputAndOutputPadding(bool enable) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - return static_cast<Result>(NNAPI_CALL( - ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, enable))); - } else { - return Result::FEATURE_LEVEL_TOO_LOW; - } - } - - Result setReusable(bool reusable) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - return static_cast<Result>( - NNAPI_CALL(ANeuralNetworksExecution_setReusable(mExecution, reusable))); - } else { - return Result::FEATURE_LEVEL_TOO_LOW; - } - } - -#ifndef NNTEST_SLTS Result startCompute(Event* event) { ANeuralNetworksEvent* ev = nullptr; - Result result = static_cast<Result>( - NNAPI_CALL(ANeuralNetworksExecution_startCompute(mExecution, &ev))); + Result result = static_cast<Result>(ANeuralNetworksExecution_startCompute(mExecution, &ev)); event->set(ev); return result; } @@ -672,36 +468,29 @@ std::transform(dependencies.begin(), dependencies.end(), deps.begin(), [](const Event* e) { return e->getHandle(); }); ANeuralNetworksEvent* ev = nullptr; - Result result = static_cast<Result>( - NNAPI_CALL(ANeuralNetworksExecution_startComputeWithDependencies( - mExecution, deps.data(), deps.size(), duration, &ev))); + Result result = static_cast<Result>(ANeuralNetworksExecution_startComputeWithDependencies( + mExecution, deps.data(), deps.size(), duration, &ev)); event->set(ev); return result; } -#endif - Result compute() { - return static_cast<Result>(NNAPI_CALL(ANeuralNetworksExecution_compute(mExecution))); - } + Result compute() { return static_cast<Result>(ANeuralNetworksExecution_compute(mExecution)); } Result getOutputOperandDimensions(uint32_t index, std::vector<uint32_t>* dimensions) { uint32_t rank = 0; - Result result = static_cast<Result>(NNAPI_CALL( - ANeuralNetworksExecution_getOutputOperandRank(mExecution, index, &rank))); + Result result = static_cast<Result>( + ANeuralNetworksExecution_getOutputOperandRank(mExecution, index, &rank)); dimensions->resize(rank); if ((result != Result::NO_ERROR && result != Result::OUTPUT_INSUFFICIENT_SIZE) || rank == 0) { return result; } - result = static_cast<Result>(NNAPI_CALL(ANeuralNetworksExecution_getOutputOperandDimensions( - mExecution, index, dimensions->data()))); + result = static_cast<Result>(ANeuralNetworksExecution_getOutputOperandDimensions( + mExecution, index, dimensions->data())); return result; } private: -#ifdef NNTEST_SLTS - const NnApiSupportLibrary* mNnApi = nullptr; -#endif ANeuralNetworksExecution* mExecution = nullptr; };
diff --git a/runtime/include/NeuralNetworksWrapperExtensions.h b/runtime/include/NeuralNetworksWrapperExtensions.h index 967efef..524e8da 100644 --- a/runtime/include/NeuralNetworksWrapperExtensions.h +++ b/runtime/include/NeuralNetworksWrapperExtensions.h
@@ -17,13 +17,11 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_WRAPPER_EXTENSIONS_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_WRAPPER_EXTENSIONS_H -#include <utility> -#include <variant> -#include <vector> - #include "NeuralNetworksExtensions.h" #include "NeuralNetworksWrapper.h" +#include <variant> + namespace android { namespace nn { namespace extension_wrapper {
diff --git a/runtime/libneuralnetworks.map.txt b/runtime/libneuralnetworks.map.txt index 5314634..31cabcb 100644 --- a/runtime/libneuralnetworks.map.txt +++ b/runtime/libneuralnetworks.map.txt
@@ -24,7 +24,6 @@ ANeuralNetworks_getDeviceCount; # introduced=Q ANeuralNetworks_getDevice; # introduced=Q ANeuralNetworks_getMaximumLoopTimeout; # introduced=30 - ANeuralNetworks_getRuntimeFeatureLevel; # introduced=31 ANeuralNetworksDevice_getName; # introduced=Q ANeuralNetworksDevice_getType; # introduced=Q ANeuralNetworksDevice_getVersion; # introduced=Q @@ -61,16 +60,11 @@ ANeuralNetworksCompilation_finish; ANeuralNetworksCompilation_setPriority; # introduced=30 ANeuralNetworksCompilation_setTimeout; # introduced=30 - ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput; # introduced=31 - ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput; # introduced=31 - ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput; # introduced=31 - ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput; # introduced=31 ANeuralNetworksBurst_create; # introduced=Q ANeuralNetworksBurst_free; # introduced=Q ANeuralNetworksExecution_burstCompute; # introduced=Q ANeuralNetworksExecution_compute; # introduced=Q ANeuralNetworksExecution_create; - ANeuralNetworksExecution_enableInputAndOutputPadding; # introduced=31 ANeuralNetworksExecution_free; ANeuralNetworksExecution_getDuration; # introduced=Q ANeuralNetworksExecution_setInput; @@ -78,7 +72,6 @@ ANeuralNetworksExecution_setMeasureTiming; # introduced=Q ANeuralNetworksExecution_setOutput; ANeuralNetworksExecution_setOutputFromMemory; - ANeuralNetworksExecution_setReusable; # introduced=31 ANeuralNetworksExecution_startCompute; ANeuralNetworksExecution_startComputeWithDependencies; # introduced=30 ANeuralNetworksExecution_getOutputOperandDimensions; # introduced=Q
diff --git a/runtime/packageinfo/Android.bp b/runtime/packageinfo/Android.bp index 807b4b7..c9f8e9e 100644 --- a/runtime/packageinfo/Android.bp +++ b/runtime/packageinfo/Android.bp
@@ -1,14 +1,3 @@ -package { - // See: http://go/android-license-faq - // A large-scale-change added 'default_applicable_licenses' to import - // all of the 'license_kinds' from "packages_modules_NeuralNetworks_runtime_license" - // to get the below license kinds: - // SPDX-license-identifier-Apache-2.0 - default_applicable_licenses: [ - "packages_modules_NeuralNetworks_runtime_license", - ], -} - cc_library_shared { name: "libneuralnetworks_packageinfo", defaults: ["neuralnetworks_defaults"], @@ -47,6 +36,7 @@ }, visibility: [ + "//frameworks/ml/nn:__subpackages__", "//packages/modules/NeuralNetworks:__subpackages__", ], }
diff --git a/runtime/packageinfo/PackageInfo.cpp b/runtime/packageinfo/PackageInfo.cpp index 85e29a6..b986acb 100644 --- a/runtime/packageinfo/PackageInfo.cpp +++ b/runtime/packageinfo/PackageInfo.cpp
@@ -17,11 +17,9 @@ #define LOG_TAG "PackageInfo" #include "PackageInfo.h" - #include <android-base/logging.h> #include <android/content/pm/IPackageManagerNative.h> #include <binder/IServiceManager.h> - #include <string> #include <vector>
diff --git a/runtime/test/Android.bp b/runtime/test/Android.bp index 12c5667..a0b0ad1 100644 --- a/runtime/test/Android.bp +++ b/runtime/test/Android.bp
@@ -14,18 +14,6 @@ * limitations under the License. */ -package { - // See: http://go/android-license-faq - // A large-scale-change added 'default_applicable_licenses' to import - // all of the 'license_kinds' from "packages_modules_NeuralNetworks_runtime_license" - // to get the below license kinds: - // SPDX-license-identifier-Apache-2.0 - // SPDX-license-identifier-Artistic - default_applicable_licenses: [ - "packages_modules_NeuralNetworks_runtime_license", - ], -} - cc_defaults { name: "NeuralNetworksTest_default_libs", defaults: ["neuralnetworks_defaults"], @@ -37,7 +25,6 @@ "[email protected]", "libandroid", "libbase", - "libbinder_ndk", "libcutils", "libfmq", "libhidlbase", @@ -48,16 +35,13 @@ "libutils", ], static_libs: [ - "android.hardware.common-V2-ndk_platform", - "android.hardware.graphics.common-V2-ndk_platform", - "android.hardware.neuralnetworks-V1-ndk_platform", "[email protected]", "[email protected]", "[email protected]", "[email protected]", - "libaidlcommonsupport", "libc++fs", "libneuralnetworks_generated_test_harness", + "libsync", "libtextclassifier_hash_static", ], whole_static_libs: [ @@ -84,14 +68,13 @@ "TestControlFlow.cpp", "TestFree.cpp", "TestGenerated.cpp", - "TestGpuNnapi.cpp", "TestMemory.cpp", "TestNeuralNetworksWrapper.cpp", "TestOperandExtraParams.cpp", "TestTrivialModel.cpp", "TestUnknownDimensions.cpp", "TestUnspecifiedDimensions.cpp", - "TestUpdatability.cpp", + "TestValidateModel.cpp", "TestValidateOperations.cpp", "TestValidation.cpp", @@ -104,9 +87,6 @@ static_libs: [ "libgmock", ], - shared_libs: [ - "libvulkan", - ], whole_static_libs: [ "neuralnetworks_generated_V1_0_example", "neuralnetworks_generated_V1_1_example", @@ -139,7 +119,6 @@ // // b/109953668, disable OpenMP // "TestOpenmpSettings.cpp", - "PreparedModelCallback.cpp", "TestCompilationCaching.cpp", "TestCompliance.cpp", "TestExecution.cpp", @@ -151,10 +130,13 @@ "TestPartitioning.cpp", "TestPartitioningRandom.cpp", "TestRemoveDefaultArguments.cpp", + "TestVersionedInterfaces.cpp", "fibonacci_extension/FibonacciDriver.cpp", "fibonacci_extension/FibonacciExtensionTest.cpp", "TestMain.cpp", + + "Bridge.cpp", ], static_libs: [ "[email protected]", @@ -167,7 +149,6 @@ "libneuralnetworks_common", "libneuralnetworks_generated_test_harness", "libneuralnetworks_static", - "neuralnetworks_test_utils", ], shared_libs: [ "libcutils", @@ -205,65 +186,33 @@ "libgmock", "libneuralnetworks_common", "libneuralnetworks_static", - "neuralnetworks_types", - "neuralnetworks_utils_hal_1_0", - "neuralnetworks_utils_hal_1_1", - "neuralnetworks_utils_hal_1_2", - "neuralnetworks_utils_hal_1_3", ], shared_libs: ["libmemunreachable"], header_libs: [ "libneuralnetworks_private_headers", ], - sanitize: { - integer_overflow: true, - }, } cc_fuzz { name: "libneuralnetworks_fuzzer", - defaults: [ - "NeuralNetworksTest_default_libs", - "libneuralnetworks_fuzzer_defaults", - ], + defaults: ["NeuralNetworksTest_default_libs"], owner: "google", fuzz_config: { cc: ["[email protected]"], - // Temporarily disabled due to b/151102177. - fuzz_on_haiku_host: false, - fuzz_on_haiku_device: false, }, srcs: [ + "android_fuzzing/Converter.cpp", "android_fuzzing/FuzzTest.cpp", + "android_fuzzing/StaticAssert.cpp", ], + corpus: ["android_fuzzing/corpus/*"], + shared_libs: ["libprotobuf-cpp-full"], static_libs: [ "libneuralnetworks_common", + "libneuralnetworks_fuzzer_proto", "libneuralnetworks_generated_test_harness", "libneuralnetworks_static", - ], -} - -cc_fuzz { - name: "libneuralnetworks_driver_fuzzer", - defaults: [ - "NeuralNetworksTest_default_libs", - "libneuralnetworks_fuzzer_defaults", - ], - owner: "google", - fuzz_config: { - cc: ["[email protected]"], - // Temporarily disabled due to b/151102177. - fuzz_on_haiku_host: false, - fuzz_on_haiku_device: false, - }, - srcs: [ - "android_fuzzing/DriverFuzzTest.cpp", - ], - static_libs: [ - "libSampleDriver", - "libneuralnetworks_common", - "libneuralnetworks_generated_test_harness", - "libneuralnetworks_static", + "libprotobuf-mutator", ], } @@ -374,12 +323,10 @@ "TestControlFlow.cpp", "TestFree.cpp", "TestGenerated.cpp", - "TestGpuNnapi.cpp", "TestMemory.cpp", "TestNeuralNetworksWrapper.cpp", "TestTrivialModel.cpp", "TestUnknownDimensions.cpp", - "TestUpdatability.cpp", "TestValidateModel.cpp", "TestValidateOperations.cpp", "TestValidation.cpp", @@ -409,49 +356,42 @@ "libandroid", "liblog", "libneuralnetworks", - "libvulkan", ], static_libs: [ "libbase_ndk", "libgmock_ndk", "libgtest_ndk_c++", ], - min_sdk_version: "30", sdk_version: "current", - stl: "libc++_static", -} - -cc_defaults { - name: "neuralnetworks_generated_defaults", - defaults: ["neuralnetworks_float16"], - // b/146324523, NNAPI host build capability - host_supported: false, - vendor_available: true, - static_libs: ["libneuralnetworks_generated_test_harness"], + stl: "c++_static", } cc_library_static { name: "neuralnetworks_generated_V1_0_example", - defaults: ["neuralnetworks_generated_defaults"], + defaults: ["neuralnetworks_float16"], srcs: ["generated/spec_V1_0/*.example.cpp"], + static_libs: ["libneuralnetworks_generated_test_harness"], } cc_library_static { name: "neuralnetworks_generated_V1_1_example", - defaults: ["neuralnetworks_generated_defaults"], + defaults: ["neuralnetworks_float16"], srcs: ["generated/spec_V1_1/*.example.cpp"], + static_libs: ["libneuralnetworks_generated_test_harness"], } cc_library_static { name: "neuralnetworks_generated_V1_2_example", - defaults: ["neuralnetworks_generated_defaults"], + defaults: ["neuralnetworks_float16"], srcs: ["generated/spec_V1_2/*.example.cpp"], + static_libs: ["libneuralnetworks_generated_test_harness"], } cc_library_static { name: "neuralnetworks_generated_V1_3_example", - defaults: ["neuralnetworks_generated_defaults"], + defaults: ["neuralnetworks_float16"], srcs: ["generated/spec_V1_3/*.example.cpp"], + static_libs: ["libneuralnetworks_generated_test_harness"], } cc_library_static { @@ -460,94 +400,3 @@ srcs: ["generated/spec_V1_3_cts_only/*.example.cpp"], static_libs: ["libneuralnetworks_generated_test_harness"], } - -cc_library_static { - name: "NeuralNetworksTest_random_graph", - defaults: ["neuralnetworks_float16"], - srcs: [ - ":libneuralnetworks_generated_test_harness_for_cts", - "GeneratedTestUtils.cpp", - "TestNeuralNetworksWrapper.cpp", - "fuzzing/OperationManager.cpp", - "fuzzing/RandomGraphGenerator.cpp", - "fuzzing/RandomGraphGeneratorUtils.cpp", - "fuzzing/RandomVariable.cpp", - "fuzzing/operation_signatures/*.cpp", - ], - include_dirs: [ - "packages/modules/NeuralNetworks/common/include", - "packages/modules/NeuralNetworks/runtime/", - "packages/modules/NeuralNetworks/runtime/include/", - ], - header_libs: [ - "libneuralnetworks_generated_test_harness_headers_for_cts", - ], - export_include_dirs: [ - ".", - "fuzzing", - ], - cflags: [ - "-UNDEBUG", - "-Wall", - "-Werror", - ], - shared_libs: [ - "libandroid", - "liblog", - "libneuralnetworks", - ], - static_libs: [ - "libbase_ndk", - "libgmock_ndk", - "libgtest_ndk_c++", - ], - sanitize: { - address: true, - all_undefined: true, - }, - strip: { - keep_symbols: true, - }, - sdk_version: "current", - stl: "libc++_static", -} - -cc_test { - name: "NeuralNetworksSupportLibraryTest", - srcs: [ - "GeneratedTestUtils.cpp", - "SupportLibraryTestGenerated.cpp", - "SupportLibraryTestMain.cpp", - ], - cflags: [ - "-DNNTEST_COMPUTE_MODE", - "-DNNTEST_ONLY_PUBLIC_API", - "-DNNTEST_SLTS", - "-DNN_COMPATIBILITY_LIBRARY_BUILD", - "-Wall", - "-Werror", - ], - shared_libs: [ - "libandroid", - "liblog", - ], - static_libs: [ - "libbase_ndk", - "libgmock_ndk", - "libgtest_ndk_c++", - "libneuralnetworks_generated_test_harness", - "neuralnetworks_supportlibrary_loader", - ], - whole_static_libs: [ - "neuralnetworks_generated_V1_0_example", - "neuralnetworks_generated_V1_1_example", - "neuralnetworks_generated_V1_2_example", - "neuralnetworks_generated_V1_3_cts_only_example", - "neuralnetworks_generated_V1_3_example", - ], - include_dirs: [ - "packages/modules/NeuralNetworks/runtime/include/", - "packages/modules/NeuralNetworks/tools/test_generator/test_harness/include", - ], - test_suites: ["general-tests"], -}
diff --git a/runtime/test/AndroidTest.xml b/runtime/test/AndroidTest.xml index c9d1a71..2815736 100644 --- a/runtime/test/AndroidTest.xml +++ b/runtime/test/AndroidTest.xml
@@ -29,6 +29,5 @@ <test class="com.android.tradefed.testtype.GTest" > <option name="native-test-device-path" value="/data/local/tmp" /> <option name="module-name" value="NeuralNetworksTest_static" /> - <option name="native-test-timeout" value="3h" /> </test> </configuration>
diff --git a/runtime/test/Bridge.cpp b/runtime/test/Bridge.cpp new file mode 100644 index 0000000..5740256 --- /dev/null +++ b/runtime/test/Bridge.cpp
@@ -0,0 +1,36 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// There are name clashes between NeuralNetworksWrapper.h and +// HalInterfaces.h. Many tests include the former; many internal +// header files (nn/runtime/*.h) include the latter. This file +// contains a few utilities for tests to call that trampoline to the +// internal headers. + +#include "GraphDump.h" +#include "ModelBuilder.h" + +namespace android { +namespace nn { +namespace bridge_tests { + +void graphDump(const char* name, const ModelBuilder* model, std::ostream* outStream) { + ::android::nn::graphDump(name, model->makeHidlModel(), outStream); +} + +} // namespace bridge_tests +} // namespace nn +} // namespace android
diff --git a/runtime/test/Bridge.h b/runtime/test/Bridge.h new file mode 100644 index 0000000..f067df0 --- /dev/null +++ b/runtime/test/Bridge.h
@@ -0,0 +1,42 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// There are name clashes between NeuralNetworksWrapper.h and +// HalInterfaces.h. Many tests include the former; many internal +// header files (nn/runtime/*.h) include the latter. This file +// contains a few utilities for tests to call that trampoline to the +// internal headers. + +#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_BRIDGE_H +#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_BRIDGE_H + +#include <iostream> + +namespace android { +namespace nn { + +class ModelBuilder; + +namespace bridge_tests { + +void graphDump(const char* name, const ModelBuilder* model, std::ostream* outStream = &std::cout); + +} // namespace bridge_tests + +} // namespace nn +} // namespace android + +#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_BRIDGE_H
diff --git a/runtime/test/GeneratedTestUtils.cpp b/runtime/test/GeneratedTestUtils.cpp index a615a6f..abbf79a 100644 --- a/runtime/test/GeneratedTestUtils.cpp +++ b/runtime/test/GeneratedTestUtils.cpp
@@ -26,13 +26,7 @@ #include <vector> #include "TestHarness.h" - -#ifdef NNTEST_SLTS -#include <android/hardware_buffer.h> -#include "SupportLibraryWrapper.h" -#else #include "TestNeuralNetworksWrapper.h" -#endif namespace android::nn::generated_tests { using namespace test_wrapper; @@ -55,11 +49,7 @@ // A Memory object that owns AHardwareBuffer class MemoryAHWB : public Memory { public: -#ifdef NNTEST_SLTS - static std::unique_ptr<MemoryAHWB> create(const NnApiSupportLibrary* nnapi, uint32_t size) { -#else static std::unique_ptr<MemoryAHWB> create(uint32_t size) { -#endif const uint64_t usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN; AHardwareBuffer_Desc desc = { @@ -77,11 +67,7 @@ EXPECT_EQ(AHardwareBuffer_lock(ahwb, usage, -1, nullptr, &buffer), 0); EXPECT_NE(buffer, nullptr); -#ifdef NNTEST_SLTS - return std::unique_ptr<MemoryAHWB>(new MemoryAHWB(nnapi, ahwb, buffer)); -#else return std::unique_ptr<MemoryAHWB>(new MemoryAHWB(ahwb, buffer)); -#endif } ~MemoryAHWB() override { @@ -92,23 +78,13 @@ void* getPointer() const { return mBuffer; } private: -#ifdef NNTEST_SLTS - MemoryAHWB(const NnApiSupportLibrary* nnapi, AHardwareBuffer* ahwb, void* buffer) - : Memory(nnapi, ahwb, false, {}), mAhwb(ahwb), mBuffer(buffer) {} -#else MemoryAHWB(AHardwareBuffer* ahwb, void* buffer) : Memory(ahwb), mAhwb(ahwb), mBuffer(buffer) {} -#endif AHardwareBuffer* mAhwb; void* mBuffer; }; -#ifdef NNTEST_SLTS -static std::unique_ptr<MemoryAHWB> createConstantReferenceMemory(const NnApiSupportLibrary* nnapi, - const TestModel& testModel) { -#else static std::unique_ptr<MemoryAHWB> createConstantReferenceMemory(const TestModel& testModel) { -#endif uint32_t size = 0; auto processSubgraph = [&size](const TestSubgraph& subgraph) { @@ -123,11 +99,7 @@ for (const TestSubgraph& subgraph : testModel.referenced) { processSubgraph(subgraph); } -#ifdef NNTEST_SLTS - return size == 0 ? nullptr : MemoryAHWB::create(nnapi, size); -#else return size == 0 ? nullptr : MemoryAHWB::create(size); -#endif } static void createModelFromSubgraph(const TestSubgraph& subgraph, bool testDynamicOutputShape, @@ -158,7 +130,6 @@ CHECK_LT(refIndex, refSubgraphs.size()); const TestSubgraph& refSubgraph = refSubgraphs[refIndex]; Model* refModel = &refModels[refIndex]; - if (!refModel->isFinished()) { createModelFromSubgraph(refSubgraph, testDynamicOutputShape, refSubgraphs, memory, memoryOffset, refModel, refModels); @@ -184,29 +155,12 @@ model->identifyInputsAndOutputs(subgraph.inputIndexes, subgraph.outputIndexes); } -#ifdef NNTEST_SLTS -void createModel(const NnApiSupportLibrary* nnapi, const TestModel& testModel, - bool testDynamicOutputShape, GeneratedModel* model) { -#else void createModel(const TestModel& testModel, bool testDynamicOutputShape, GeneratedModel* model) { -#endif ASSERT_NE(nullptr, model); -#ifdef NNTEST_SLTS - std::unique_ptr<MemoryAHWB> memory = createConstantReferenceMemory(nnapi, testModel); -#else std::unique_ptr<MemoryAHWB> memory = createConstantReferenceMemory(testModel); -#endif uint32_t memoryOffset = 0; -#ifdef NNTEST_SLTS - std::vector<Model> refModels; - refModels.reserve(testModel.referenced.size()); - for (int i = 0; i < testModel.referenced.size(); ++i) { - refModels.push_back(Model(nnapi)); - } -#else std::vector<Model> refModels(testModel.referenced.size()); -#endif createModelFromSubgraph(testModel.main, testDynamicOutputShape, testModel.referenced, memory, &memoryOffset, model, refModels.data()); model->setRefModels(std::move(refModels));
diff --git a/runtime/test/GeneratedTestUtils.h b/runtime/test/GeneratedTestUtils.h index 5a3866d..1354f67 100644 --- a/runtime/test/GeneratedTestUtils.h +++ b/runtime/test/GeneratedTestUtils.h
@@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_TEST_GENERATED_TEST_UTILS_H -#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_TEST_GENERATED_TEST_UTILS_H +#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_GENERATED_TEST_UTILS_H +#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_GENERATED_TEST_UTILS_H #include <gtest/gtest.h> @@ -25,19 +25,10 @@ #include <vector> #include "TestHarness.h" - -#ifdef NNTEST_SLTS -#include "SupportLibraryWrapper.h" -#else #include "TestNeuralNetworksWrapper.h" -#endif namespace android::nn::generated_tests { -#ifdef NNTEST_SLTS -namespace test_wrapper = android::nn::sl_wrapper; -#endif - class GeneratedTestBase : public ::testing::TestWithParam<test_helper::TestModelManager::TestParam> { protected: @@ -54,10 +45,6 @@ // A generated NDK model. class GeneratedModel : public test_wrapper::Model { public: -#ifdef NNTEST_SLTS - GeneratedModel(const NnApiSupportLibrary* nnapi) : sl_wrapper::Model(nnapi) {} -#endif - // A helper method to simplify referenced model lifetime management. // // Usage: @@ -83,24 +70,15 @@ }; // Convert TestModel to NDK model. -#ifdef NNTEST_SLTS -void createModel(const NnApiSupportLibrary* nnapi, const test_helper::TestModel& testModel, - bool testDynamicOutputShape, GeneratedModel* model); -inline void createModel(const NnApiSupportLibrary* nnapi, const test_helper::TestModel& testModel, - GeneratedModel* model) { - createModel(nnapi, testModel, /*testDynamicOutputShape=*/false, model); -} -#else void createModel(const test_helper::TestModel& testModel, bool testDynamicOutputShape, GeneratedModel* model); inline void createModel(const test_helper::TestModel& testModel, GeneratedModel* model) { createModel(testModel, /*testDynamicOutputShape=*/false, model); } -#endif void createRequest(const test_helper::TestModel& testModel, test_wrapper::Execution* execution, std::vector<test_helper::TestBuffer>* outputs); } // namespace android::nn::generated_tests -#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_TEST_GENERATED_TEST_UTILS_H +#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_GENERATED_TEST_UTILS_H
diff --git a/runtime/test/HalUtils.h b/runtime/test/HalUtils.h deleted file mode 100644 index 98503c3..0000000 --- a/runtime/test/HalUtils.h +++ /dev/null
@@ -1,66 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_HAL_UTILS_H -#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_HAL_UTILS_H - -#include <HalInterfaces.h> -#include <Utils.h> -#include <nnapi/hal/1.0/Device.h> -#include <nnapi/hal/1.1/Device.h> -#include <nnapi/hal/1.2/Device.h> -#include <nnapi/hal/1.3/Device.h> -#include <utils/StrongPointer.h> - -#include <string> -#include <utility> - -namespace android::nn { - -// Creates valid V1_3::Capabilities. -inline V1_3::Capabilities makeCapabilities(float perf) { - const V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf}; - return {.relaxedFloat32toFloat16PerformanceScalar = perfInfo, - .relaxedFloat32toFloat16PerformanceTensor = perfInfo, - .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(perfInfo), - .ifPerformance = perfInfo, - .whilePerformance = perfInfo}; -} - -inline SharedDevice makeSharedDevice(std::string name, sp<V1_0::IDevice> driver) { - auto handleError = [](GeneralResult<SharedDevice> result) -> SharedDevice { - if (!result.has_value()) { - LOG(ERROR) << "Failed to create Device (" << result.error().code - << "): " << result.error().message; - return nullptr; - } - return std::move(result).value(); - }; - if (auto driver13 = V1_3::IDevice::castFrom(driver).withDefault(nullptr); driver13 != nullptr) { - return handleError(V1_3::utils::Device::create(std::move(name), std::move(driver13))); - } - if (auto driver12 = V1_2::IDevice::castFrom(driver).withDefault(nullptr); driver12 != nullptr) { - return handleError(V1_2::utils::Device::create(std::move(name), std::move(driver12))); - } - if (auto driver11 = V1_1::IDevice::castFrom(driver).withDefault(nullptr); driver11 != nullptr) { - return handleError(V1_1::utils::Device::create(std::move(name), std::move(driver11))); - } - return handleError(V1_0::utils::Device::create(std::move(name), std::move(driver))); -} - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_HAL_UTILS_H
diff --git a/runtime/test/PreparedModelCallback.cpp b/runtime/test/PreparedModelCallback.cpp deleted file mode 100644 index 30ed3b3..0000000 --- a/runtime/test/PreparedModelCallback.cpp +++ /dev/null
@@ -1,81 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "PreparedModelCallback.h" - -namespace android::nn { - -hardware::Return<void> PreparedModelCallback::notifyInternal( - bool deadObject, ErrorStatus errorStatus, const sp<V1_0::IPreparedModel>& preparedModel) { - { - std::lock_guard<std::mutex> hold(mMutex); - - // quick-return if object has already been notified - if (mNotified) { - return hardware::Void(); - } - - // store results and mark as notified - mDeadObject = deadObject; - mErrorStatus = errorStatus; - mPreparedModel = preparedModel; - mNotified = true; - } - - mCondition.notify_all(); - return hardware::Void(); -} - -hardware::Return<void> PreparedModelCallback::notify( - V1_0::ErrorStatus errorStatus, const sp<V1_0::IPreparedModel>& preparedModel) { - return notifyInternal(false, uncheckedConvert(errorStatus), preparedModel); -} - -hardware::Return<void> PreparedModelCallback::notify_1_2( - V1_0::ErrorStatus errorStatus, const sp<V1_2::IPreparedModel>& preparedModel) { - return notifyInternal(false, uncheckedConvert(errorStatus), preparedModel); -} - -hardware::Return<void> PreparedModelCallback::notify_1_3( - V1_3::ErrorStatus errorStatus, const sp<V1_3::IPreparedModel>& preparedModel) { - return notifyInternal(false, uncheckedConvert(errorStatus), preparedModel); -} - -void PreparedModelCallback::notifyAsDeadObject() { - notifyInternal(true, ErrorStatus::GENERAL_FAILURE, nullptr); -} - -void PreparedModelCallback::wait() const { - std::unique_lock<std::mutex> lock(mMutex); - mCondition.wait(lock, [this] { return mNotified; }); -} - -ErrorStatus PreparedModelCallback::getStatus() const { - wait(); - return mErrorStatus; -} - -sp<V1_0::IPreparedModel> PreparedModelCallback::getPreparedModel() const { - wait(); - return mPreparedModel; -} - -bool PreparedModelCallback::isDeadObject() const { - wait(); - return mDeadObject; -} - -} // namespace android::nn
diff --git a/runtime/test/PreparedModelCallback.h b/runtime/test/PreparedModelCallback.h deleted file mode 100644 index d3c2bfc..0000000 --- a/runtime/test/PreparedModelCallback.h +++ /dev/null
@@ -1,209 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_PREPARED_MODEL_CALLBACK_H -#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_PREPARED_MODEL_CALLBACK_H - -#include <HalInterfaces.h> -#include <Utils.h> -#include <android-base/thread_annotations.h> -#include <nnapi/Types.h> - -#include <condition_variable> -#include <functional> -#include <mutex> -#include <thread> -#include <vector> - -/* - * The Callback classes are used internally by the NeuralNetworks runtime to - * synchronize between different threads. An asynchronous task is launched - * paired with a callback object. When a client thread requires the output being - * generated by the asynchronous task, the client thread can wait for the result - * and be blocked until it has completed. Any wait may safely be called - * concurrently, even on the same callback object. When the asynchronous task - * has finished its workload, it must immediately call "notify*". If the - * asynchronous task has failed to launch, the function that tried to launch the - * asynchronous task must immediately call "notify*". This "notify*" call - * awakens any client threads waiting on the callback object. - * - * These classes exist to enable synchronization across HIDL. When - * synchronization is only required in the same process, consider using - * std::future, std::mutex, std::condition_variable, or std::experimental::latch - * instead. - */ - -namespace android::nn { - -/** - * The PreparedModelCallback class is used to receive the error status of - * preparing a model as well as the prepared model from a task executing - * asynchronously with respect to the runtime. If a calling thread calls wait - * or get* on a PreparedModelCallback object and the corresponding asynchronous - * task has not finished preparing the model, the calling thread will block - * until the asynchronous task has called notify*. - * - * If the callback object is notified more than once, only the results of the - * first call to notify* are used, and the results from subsequent calls are - * discarded. - * - * This callback object is passed as an argument to IDevice::prepareModel*. - */ -class PreparedModelCallback : public V1_3::IPreparedModelCallback { - public: - /** - * IPreparedModelCallback::notify marks the callback object with the return - * status of the asynchronous model preparation along with the prepared - * model, and allows all prior and future wait calls on the - * PreparedModelCallback object to proceed. - * - * One of IPreparedModelCallback::notify, IPreparedModelCallback::notify_1_2, - * or IPreparedModelCallback::notify_1_3 must be called on a given - * PreparedModelCallback object. - * - * If the callback object is notified more than once, only the results of - * the first call to notify* are used, and the results from subsequent calls - * are discarded. - * - * @param status Error status returned from asynchronously preparing the - * model; will be: - * - NONE if the asynchronous preparation was successful - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if there is an unspecified error - * - INVALID_ARGUMENT if the input model is invalid - * @param preparedModel Returned model that has been prepared for execution, - * nullptr if the model was unable to be prepared. - */ - hardware::Return<void> notify(V1_0::ErrorStatus status, - const sp<V1_0::IPreparedModel>& preparedModel) override; - - /** - * IPreparedModelCallback::notify_1_2 marks the callback object with the - * return status of the asynchronous model preparation along with the - * prepared model, and allows all prior and future wait calls on the - * PreparedModelCallback object to proceed. - * - * One of IPreparedModelCallback::notify, IPreparedModelCallback::notify_1_2, - * or IPreparedModelCallback::notify_1_3 must be called on a given - * PreparedModelCallback object. - * - * If the callback object is notified more than once, only the results of - * the first call to notify* are used, and the results from subsequent calls - * are discarded. - * - * @param status Error status returned from asynchronously preparing the - * model; will be: - * - NONE if the asynchronous preparation was successful - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if there is an unspecified error - * - INVALID_ARGUMENT if the input model is invalid - * @param preparedModel Returned model that has been prepared for execution, - * nullptr if the model was unable to be prepared. - */ - hardware::Return<void> notify_1_2(V1_0::ErrorStatus status, - const sp<V1_2::IPreparedModel>& preparedModel) override; - - /** - * IPreparedModelCallback::notify_1_3 marks the callback object with the - * return status of the asynchronous model preparation along with the - * prepared model, and allows all prior and future wait calls on the - * PreparedModelCallback object to proceed. - * - * One of IPreparedModelCallback::notify, IPreparedModelCallback::notify_1_2, - * or IPreparedModelCallback::notify_1_3 must be called on a given - * PreparedModelCallback object. - * - * If the callback object is notified more than once, only the results of - * the first call to notify* are used, and the results from subsequent calls - * are discarded. - * - * @param status Error status returned from asynchronously preparing the - * model; will be: - * - NONE if the asynchronous preparation was successful - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if there is an unspecified error - * - INVALID_ARGUMENT if the input model is invalid - * - MISSED_DEADLINE_* if the deadline could not be met - * - RESOURCE_EXHAUSTED_* if the task was aborted by the driver - * @param preparedModel Returned model that has been prepared for execution, - * nullptr if the model was unable to be prepared. - */ - hardware::Return<void> notify_1_3(V1_3::ErrorStatus status, - const sp<V1_3::IPreparedModel>& preparedModel) override; - - /** - * Mark the callback object as a dead object. This acts as a call to notify. - */ - void notifyAsDeadObject(); - - /** - * PreparedModelCallback::wait blocks until notify* has been called on the - * callback object. - */ - void wait() const; - - /** - * Retrieves the error status returned from the asynchronous task launched - * by IDevice::prepareModel*. If IDevice::prepareModel* has not finished - * asynchronously preparing the model, this call will block until the - * asynchronous task notifies the object. - * - * @return status Error status returned from asynchronously preparing the - * model; will be: - * - NONE if the asynchronous preparation was successful - * - DEVICE_UNAVAILABLE if driver is offline or busy - * - GENERAL_FAILURE if there is an unspecified error - * - INVALID_ARGUMENT if the input model is invalid - * - MISSED_DEADLINE_* if the deadline could not be met - * - RESOURCE_EXHAUSTED_* if the task was aborted by the driver - * - DEAD_OBJECT if the driver crashed without returning a result - */ - ErrorStatus getStatus() const; - - /** - * Retrieves the model that has been prepared for execution from the - * asynchronous task launched by IDevice::prepareModel*. If - * IDevice::prepareModel* has not finished asynchronously preparing the - * model, this call will block until the asynchronous task notifies the - * object. - * - * @return preparedModel Returned model that has been prepared for - * execution, nullptr if the model was unable to be prepared. - */ - sp<V1_0::IPreparedModel> getPreparedModel() const; - - /** - * Queries whether the object is dead. - * - * @return 'true' if dead, 'false' otherwise. - */ - bool isDeadObject() const; - - private: - hardware::Return<void> notifyInternal(bool deadObject, ErrorStatus errorStatus, - const sp<V1_0::IPreparedModel>& preparedModel); - - mutable std::mutex mMutex; - mutable std::condition_variable mCondition; - bool mNotified GUARDED_BY(mMutex) = false; - bool mDeadObject = false; - ErrorStatus mErrorStatus = ErrorStatus::GENERAL_FAILURE; - sp<V1_0::IPreparedModel> mPreparedModel; -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_PREPARED_MODEL_CALLBACK_H
diff --git a/runtime/test/SupportLibraryTestGenerated.cpp b/runtime/test/SupportLibraryTestGenerated.cpp deleted file mode 100644 index 3539ffc..0000000 --- a/runtime/test/SupportLibraryTestGenerated.cpp +++ /dev/null
@@ -1,622 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <android-base/logging.h> -#include <android-base/properties.h> -#include <android-base/unique_fd.h> -#include <ftw.h> -#include <gtest/gtest.h> -#include <unistd.h> - -#include <algorithm> -#include <cassert> -#include <cmath> -#include <fstream> -#include <iostream> -#include <map> -#include <memory> -#include <random> -#include <set> -#include <string> -#include <thread> -#include <utility> -#include <vector> - -#include "GeneratedTestUtils.h" -#include "SupportLibraryTestUtils.h" -#include "SupportLibraryWrapper.h" - -// Systrace is not available from CTS tests due to platform layering -// constraints. We reuse the NNTEST_ONLY_PUBLIC_API flag, as that should also be -// the case for CTS (public APIs only). -#ifndef NNTEST_ONLY_PUBLIC_API -#include <Tracing.h> -#else -#define NNTRACE_FULL_RAW(...) -#define NNTRACE_APP(...) -#define NNTRACE_APP_SWITCH(...) -#endif - -extern std::string SUPPORT_LIBRARY_NAME; - -namespace android::nn::generated_tests { -using namespace sl_wrapper; -using namespace test_helper; - -class GeneratedTests : public GeneratedTestBase { - protected: - void SetUp() override; - void TearDown() override; - - bool shouldSkipTest(); - - ANeuralNetworksMemory* createDeviceMemoryForInput(const Compilation& compilation, - uint32_t index); - ANeuralNetworksMemory* createDeviceMemoryForOutput(const Compilation& compilation, - uint32_t index); - void computeWithDeviceMemories(const Compilation& compilation, const TestModel& testModel, - Execution* execution, Execution::ComputeMode computeMode, - Result* result, std::vector<TestBuffer>* outputs); - bool checkSupported(const Model& model, ANeuralNetworksDevice* device); - std::optional<Compilation> compileModel(const Model& model, ANeuralNetworksDevice* device); - void executeWithCompilation(const Compilation& compilation, const TestModel& testModel); - void executeOnce(const Model& model, const TestModel& testModel); - void executeMultithreadedOwnCompilation(const Model& model, const TestModel& testModel); - void executeMultithreadedSharedCompilation(const Model& model, const TestModel& testModel); - // Test driver for those generated from ml/nn/runtime/test/spec - void execute(const TestModel& testModel); - - // VNDK version of the device under test. - static int mVndkVersion; - - std::string mCacheDir; - std::vector<uint8_t> mToken; - bool mTestCompilationCaching = false; - bool mTestDynamicOutputShape = false; - bool mExpectFailure = false; - bool mTestQuantizationCoupling = false; - bool mTestDeviceMemory = false; - Execution::ComputeMode mComputeMode = Execution::getComputeMode(); - - std::unique_ptr<const NnApiSupportLibrary> mNnApi = - loadNnApiSupportLibrary(SUPPORT_LIBRARY_NAME); -}; - -int GeneratedTests::mVndkVersion = __ANDROID_API_FUTURE__; - -// Tag for the dynamic output shape tests -class DynamicOutputShapeTest : public GeneratedTests { - protected: - DynamicOutputShapeTest() { mTestDynamicOutputShape = true; } -}; - -// Tag for the fenced execute tests -class FencedComputeTest : public GeneratedTests {}; - -// Tag for the generated validation tests -class GeneratedValidationTests : public GeneratedTests { - protected: - GeneratedValidationTests() { mExpectFailure = true; } -}; - -class QuantizationCouplingTest : public GeneratedTests { - protected: - QuantizationCouplingTest() { mTestQuantizationCoupling = true; } -}; - -class DeviceMemoryTest : public GeneratedTests { - protected: - DeviceMemoryTest() { mTestDeviceMemory = true; } -}; - -bool GeneratedTests::checkSupported(const Model& model, ANeuralNetworksDevice* device) { - constexpr static int MAX_NUM_OPS = 256; - std::array<bool, MAX_NUM_OPS> supportedOps; - for (int i = 0; i < MAX_NUM_OPS; ++i) { - supportedOps[i] = true; - } - EXPECT_EQ(mNnApi->ANeuralNetworksModel_getSupportedOperationsForDevices( - model.getHandle(), &device, /*numDevices=*/1, supportedOps.data()), - ANEURALNETWORKS_NO_ERROR); - const bool fullySupportedModel = - std::all_of(supportedOps.begin(), supportedOps.end(), [](bool v) { return v; }); - return fullySupportedModel; -} - -static std::vector<base::unique_fd> createCacheFds(const std::vector<std::string>& files) { - std::vector<base::unique_fd> fds; - fds.reserve(files.size()); - for (const auto& file : files) { - auto fd = base::unique_fd(open(file.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)); - if (fd.get() == -1) { - [] { FAIL(); }(); - return {}; - } - fds.push_back(std::move(fd)); - } - return fds; -} - -std::optional<Compilation> GeneratedTests::compileModel(const Model& model, - ANeuralNetworksDevice* device) { - NNTRACE_APP(NNTRACE_PHASE_COMPILATION, "compileModel"); - - if (mTestCompilationCaching) { - // Compile the model twice with the same token, so that compilation caching will be - // exercised if supported by the driver. - // No invalid model will be passed to this branch. - EXPECT_FALSE(mExpectFailure); - - std::string mode = ::android::base::GetProperty("debug.nn.slts.caching", "random"); - bool useSetCachingFromFds; - if (mode == "path") { - useSetCachingFromFds = false; - } else if (mode == "fds") { - useSetCachingFromFds = true; - } else if (mode == "random") { - std::string testName = ::testing::UnitTest::GetInstance()->current_test_info()->name(); - std::seed_seq seq(testName.begin(), testName.end()); - std::mt19937 gen(seq); - std::bernoulli_distribution d(0.5); - useSetCachingFromFds = d(gen); - } else { - [&mode] { - FAIL() << "System property debug.nn.slts.caching should be one of \"path\", " - "\"fds\", or \"random\"; got \"" - << mode << "\""; - }(); - return {}; - } - SCOPED_TRACE("Use setCachingFromFds = " + std::to_string(useSetCachingFromFds) + " (" + - mode + ")"); - std::cout << "\nUse setCachingFromFds = " << std::boolalpha << useSetCachingFromFds << " (" - << mode << ")" << std::endl; - - std::vector<std::string> modelCacheFilenames, dataCacheFilenames; - if (useSetCachingFromFds) { - uint32_t numModelCacheFiles, numDataCacheFiles; - EXPECT_EQ(mNnApi->SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded( - device, &numModelCacheFiles, &numDataCacheFiles), - ANEURALNETWORKS_NO_ERROR); - for (uint32_t i = 0; i < numModelCacheFiles; i++) { - modelCacheFilenames.push_back({mCacheDir + "/model" + std::to_string(i)}); - } - for (uint32_t i = 0; i < numDataCacheFiles; i++) { - dataCacheFilenames.push_back({mCacheDir + "/data" + std::to_string(i)}); - } - } - - auto resultCompilation1 = Compilation::createForDevice(mNnApi.get(), &model, device); - EXPECT_EQ(resultCompilation1.first, Result::NO_ERROR); - auto compilation1 = std::move(resultCompilation1.second); - if (useSetCachingFromFds) { - auto modelCacheFds = createCacheFds(modelCacheFilenames); - auto dataCacheFds = createCacheFds(dataCacheFilenames); - EXPECT_EQ(compilation1.setCachingFromFds(modelCacheFds, dataCacheFds, mToken), - Result::NO_ERROR); - } else { - EXPECT_EQ(compilation1.setCaching(mCacheDir, mToken), Result::NO_ERROR); - } - EXPECT_EQ(compilation1.finish(), Result::NO_ERROR); - - auto resultCompilation2 = Compilation::createForDevice(mNnApi.get(), &model, device); - EXPECT_EQ(resultCompilation2.first, Result::NO_ERROR); - auto compilation2 = std::move(resultCompilation2.second); - if (useSetCachingFromFds) { - auto modelCacheFds = createCacheFds(modelCacheFilenames); - auto dataCacheFds = createCacheFds(dataCacheFilenames); - EXPECT_EQ(compilation2.setCachingFromFds(modelCacheFds, dataCacheFds, mToken), - Result::NO_ERROR); - } else { - EXPECT_EQ(compilation2.setCaching(mCacheDir, mToken), Result::NO_ERROR); - } - EXPECT_EQ(compilation2.finish(), Result::NO_ERROR); - - return compilation2; - } else { - auto resultCompilation = Compilation::createForDevice(mNnApi.get(), &model, device); - EXPECT_EQ(resultCompilation.first, Result::NO_ERROR); - auto compilation = std::move(resultCompilation.second); - Result result = compilation.finish(); - - // For valid model, we check the compilation result == NO_ERROR. - // For invalid model, the driver may fail at compilation or execution, so any result code is - // permitted at this point. - if (mExpectFailure && result != Result::NO_ERROR) return std::nullopt; - EXPECT_EQ(result, Result::NO_ERROR); - return compilation; - } -} - -void computeWithPtrs(const TestModel& testModel, Execution* execution, - Execution::ComputeMode computeMode, Result* result, - std::vector<TestBuffer>* outputs) { - { - NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "computeWithPtrs example"); - createRequest(testModel, execution, outputs); - } - *result = execution->compute(computeMode); -} - -ANeuralNetworksMemory* GeneratedTests::createDeviceMemoryForInput(const Compilation& compilation, - uint32_t index) { - ANeuralNetworksMemoryDesc* desc = nullptr; - EXPECT_EQ(mNnApi->ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(mNnApi->ANeuralNetworksMemoryDesc_addInputRole(desc, compilation.getHandle(), index, - 1.0f), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(mNnApi->ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR); - ANeuralNetworksMemory* memory = nullptr; - mNnApi->ANeuralNetworksMemory_createFromDesc(desc, &memory); - mNnApi->ANeuralNetworksMemoryDesc_free(desc); - return memory; -} - -ANeuralNetworksMemory* GeneratedTests::createDeviceMemoryForOutput(const Compilation& compilation, - uint32_t index) { - ANeuralNetworksMemoryDesc* desc = nullptr; - EXPECT_EQ(mNnApi->ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(mNnApi->ANeuralNetworksMemoryDesc_addOutputRole(desc, compilation.getHandle(), index, - 1.0f), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(mNnApi->ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR); - ANeuralNetworksMemory* memory = nullptr; - mNnApi->ANeuralNetworksMemory_createFromDesc(desc, &memory); - mNnApi->ANeuralNetworksMemoryDesc_free(desc); - return memory; -} - -// Set result = Result::NO_ERROR and outputs = {} if the test should be skipped. -void GeneratedTests::computeWithDeviceMemories(const Compilation& compilation, - const TestModel& testModel, Execution* execution, - Execution::ComputeMode computeMode, Result* result, - std::vector<TestBuffer>* outputs) { - ASSERT_NE(execution, nullptr); - ASSERT_NE(result, nullptr); - ASSERT_NE(outputs, nullptr); - outputs->clear(); - std::vector<Memory> inputMemories, outputMemories; - - { - NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "computeWithDeviceMemories example"); - // Model inputs. - for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) { - SCOPED_TRACE("Input index: " + std::to_string(i)); - const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]]; - // Omitted input. - if (operand.data.size() == 0) { - ASSERT_EQ(Result::NO_ERROR, execution->setInput(i, nullptr, 0)); - continue; - } - - // Create device memory. - ANeuralNetworksMemory* memory = createDeviceMemoryForInput(compilation, i); - ASSERT_NE(memory, nullptr); - auto& wrapperMemory = inputMemories.emplace_back(Memory(mNnApi.get(), memory)); - - // Copy data from TestBuffer to device memory. - auto ashmem = TestAshmem::createFrom(mNnApi.get(), operand.data); - ASSERT_NE(ashmem, nullptr); - ASSERT_EQ(mNnApi->ANeuralNetworksMemory_copy(ashmem->get()->get(), memory), - ANEURALNETWORKS_NO_ERROR); - ASSERT_EQ(Result::NO_ERROR, execution->setInputFromMemory(i, &wrapperMemory, 0, 0)); - } - - // Model outputs. - for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) { - SCOPED_TRACE("Output index: " + std::to_string(i)); - ANeuralNetworksMemory* memory = createDeviceMemoryForOutput(compilation, i); - ASSERT_NE(memory, nullptr); - auto& wrapperMemory = outputMemories.emplace_back(Memory(mNnApi.get(), memory)); - ASSERT_EQ(Result::NO_ERROR, execution->setOutputFromMemory(i, &wrapperMemory, 0, 0)); - } - } - - *result = execution->compute(computeMode); - - // Copy out output results. - for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) { - SCOPED_TRACE("Output index: " + std::to_string(i)); - const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]]; - const size_t bufferSize = operand.data.size(); - auto& output = outputs->emplace_back(bufferSize); - - auto ashmem = TestAshmem::createFrom(mNnApi.get(), output); - ASSERT_NE(ashmem, nullptr); - ASSERT_EQ(mNnApi->ANeuralNetworksMemory_copy(outputMemories[i].get(), ashmem->get()->get()), - ANEURALNETWORKS_NO_ERROR); - std::copy(ashmem->dataAs<uint8_t>(), ashmem->dataAs<uint8_t>() + bufferSize, - output.getMutable<uint8_t>()); - } -} - -void GeneratedTests::executeWithCompilation(const Compilation& compilation, - const TestModel& testModel) { - NNTRACE_APP(NNTRACE_PHASE_EXECUTION, "executeWithCompilation example"); - - Execution execution(mNnApi.get(), &compilation); - Result result; - std::vector<TestBuffer> outputs; - - if (mTestDeviceMemory) { - computeWithDeviceMemories(compilation, testModel, &execution, mComputeMode, &result, - &outputs); - } else { - computeWithPtrs(testModel, &execution, mComputeMode, &result, &outputs); - } - - if (result == Result::NO_ERROR && outputs.empty()) { - return; - } - - { - NNTRACE_APP(NNTRACE_PHASE_RESULTS, "executeWithCompilation example"); - if (mExpectFailure) { - ASSERT_NE(result, Result::NO_ERROR); - return; - } else { - ASSERT_EQ(result, Result::NO_ERROR); - } - - // Check output dimensions. - for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) { - SCOPED_TRACE("Output index: " + std::to_string(i)); - const auto& output = testModel.main.operands[testModel.main.outputIndexes[i]]; - if (output.isIgnored) continue; - std::vector<uint32_t> actualDimensions; - ASSERT_EQ(Result::NO_ERROR, execution.getOutputOperandDimensions(i, &actualDimensions)); - ASSERT_EQ(output.dimensions, actualDimensions); - } - - checkResults(testModel, outputs); - } -} - -void GeneratedTests::executeOnce(const Model& model, const TestModel& testModel) { - NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeOnce"); - uint32_t numDevices = 0; - mNnApi->ANeuralNetworks_getDeviceCount(&numDevices); - bool modelSupported = false; - for (uint32_t i = 0; i < numDevices; ++i) { - ANeuralNetworksDevice* device = nullptr; - mNnApi->ANeuralNetworks_getDevice(i, &device); - const char* deviceName = nullptr; - mNnApi->ANeuralNetworksDevice_getName(device, &deviceName); - SCOPED_TRACE("Device = " + std::string(deviceName)); - std::cout << "\nDevice = " << deviceName << std::endl; - if (!checkSupported(model, device)) { - std::cout << "\nModel not supported by device " << deviceName << ". Skipping" - << std::endl; - continue; - } - modelSupported = true; - std::cout << "\nModel supported" << std::endl; - std::optional<Compilation> compilation = compileModel(model, device); - // Early return if compilation fails. The compilation result code is - // checked in compileModel. - if (!compilation) return; - executeWithCompilation(compilation.value(), testModel); - std::cout << "\nExecution completed" << std::endl; - } - if (!modelSupported) { - std::cout << "\nModel not supported by any device\n" - << "SKIPPED" << std::endl; - } -} - -void GeneratedTests::executeMultithreadedOwnCompilation(const Model& model, - const TestModel& testModel) { - NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedOwnCompilation"); - SCOPED_TRACE("MultithreadedOwnCompilation"); - std::cout << "\nMultithreadedOwnCompilation" << std::endl; - std::vector<std::thread> threads; - for (int i = 0; i < 10; i++) { - threads.push_back(std::thread([&]() { executeOnce(model, testModel); })); - } - std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); }); -} - -void GeneratedTests::executeMultithreadedSharedCompilation(const Model& model, - const TestModel& testModel) { - NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedSharedCompilation"); - SCOPED_TRACE("MultithreadedSharedCompilation"); - std::cout << "\nMultithreadedSharedCompilation" << std::endl; - uint32_t numDevices = 0; - mNnApi->ANeuralNetworks_getDeviceCount(&numDevices); - bool modelSupported = false; - for (uint32_t i = 0; i < numDevices; ++i) { - ANeuralNetworksDevice* device = nullptr; - mNnApi->ANeuralNetworks_getDevice(i, &device); - const char* deviceName = nullptr; - mNnApi->ANeuralNetworksDevice_getName(device, &deviceName); - SCOPED_TRACE("Device = " + std::string(deviceName)); - std::cout << "\nDevice = " << deviceName << std::endl; - if (!checkSupported(model, device)) { - std::cout << "\nModel not supported by device " << deviceName << ". Skipping" - << std::endl; - continue; - } - modelSupported = true; - std::cout << "\nModel supported" << std::endl; - std::optional<Compilation> compilation = compileModel(model, device); - // Early return if compilation fails. The ompilation result code is - // checked in compileModel. - if (!compilation) return; - std::vector<std::thread> threads; - for (int i = 0; i < 10; i++) { - threads.push_back( - std::thread([&]() { executeWithCompilation(compilation.value(), testModel); })); - } - std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); }); - std::cout << "\nExecution completed" << std::endl; - } - if (!modelSupported) { - std::cout << "\nModel not supported by any device\n" - << "SKIPPED" << std::endl; - } -} - -// Test driver for those generated from ml/nn/runtime/test/spec -void GeneratedTests::execute(const TestModel& testModel) { - NNTRACE_APP(NNTRACE_PHASE_OVERALL, "execute"); - GeneratedModel model(mNnApi.get()); - createModel(mNnApi.get(), testModel, mTestDynamicOutputShape, &model); - if (testModel.expectFailure && !model.isValid()) { - return; - } - ASSERT_EQ(model.finish(), Result::NO_ERROR); - ASSERT_TRUE(model.isValid()); - auto executeInternal = [&testModel, &model, this]() { - SCOPED_TRACE("TestCompilationCaching = " + std::to_string(mTestCompilationCaching)); - std::cout << "\nCompilationCaching = " << std::boolalpha << mTestCompilationCaching - << std::endl; -#ifndef NNTEST_MULTITHREADED - executeOnce(model, testModel); -#else // defined(NNTEST_MULTITHREADED) - executeMultithreadedOwnCompilation(model, testModel); - executeMultithreadedSharedCompilation(model, testModel); -#endif // !defined(NNTEST_MULTITHREADED) - }; - mTestCompilationCaching = false; - executeInternal(); - if (!mExpectFailure) { - mTestCompilationCaching = true; - executeInternal(); - } -} - -bool GeneratedTests::shouldSkipTest() { - // A map of {min VNDK version -> tests that should be skipped with earlier VNDK versions}. - // The listed tests are added in a later release, but exercising old APIs. They should be - // skipped if the device has a mixed build of system and vendor partitions. - static const std::map<int, std::set<std::string>> kMapOfMinVndkVersionToTests = { - { - __ANDROID_API_R__, - { - "add_broadcast_quant8_all_inputs_as_internal", - }, - }, - }; - for (const auto& [minVersion, names] : kMapOfMinVndkVersionToTests) { - if (mVndkVersion < minVersion && names.count(kTestName) > 0) { - return true; - } - } - return false; -} - -void GeneratedTests::SetUp() { - GeneratedTestBase::SetUp(); - - mVndkVersion = ::android::base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__); - if (shouldSkipTest()) { - GTEST_SKIP(); - return; - } - - char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX"; - char* cacheDir = mkdtemp(cacheDirTemp); - ASSERT_NE(cacheDir, nullptr); - mCacheDir = cacheDir; - mToken = std::vector<uint8_t>(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0); -} - -void GeneratedTests::TearDown() { - mNnApi.reset(nullptr); - - if (!::testing::Test::HasFailure()) { - // TODO: Switch to std::filesystem::remove_all once libc++fs is made available in CTS. - // Remove the cache directory specified by path recursively. - auto callback = [](const char* child, const struct stat*, int, struct FTW*) { - return remove(child); - }; - nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS); - } - GeneratedTestBase::TearDown(); -} - -#ifdef NNTEST_COMPUTE_MODE -TEST_P(GeneratedTests, Sync) { - std::cout << "\nComputeMode = SYNC" << std::endl; - mComputeMode = Execution::ComputeMode::SYNC; - execute(testModel); -} - -TEST_P(GeneratedTests, Burst) { - std::cout << "\nComputeMode = BURST" << std::endl; - mComputeMode = Execution::ComputeMode::BURST; - execute(testModel); -} -#else -TEST_P(GeneratedTests, Test) { - execute(testModel); -} -#endif - -TEST_P(DynamicOutputShapeTest, Test) { - execute(testModel); -} - -TEST_P(GeneratedValidationTests, Test) { - execute(testModel); -} - -TEST_P(QuantizationCouplingTest, Test) { - execute(convertQuant8AsymmOperandsToSigned(testModel)); -} - -TEST_P(DeviceMemoryTest, Test) { - execute(testModel); -} - -TEST_P(FencedComputeTest, Test) { - mComputeMode = Execution::ComputeMode::FENCED; - execute(testModel); -} - -INSTANTIATE_GENERATED_TEST(GeneratedTests, - [](const TestModel& testModel) { return !testModel.expectFailure; }); - -INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest, [](const TestModel& testModel) { - return !testModel.expectFailure && !testModel.hasScalarOutputs(); -}); - -INSTANTIATE_GENERATED_TEST(GeneratedValidationTests, [](const TestModel& testModel) { - return testModel.expectFailure && !testModel.isInfiniteLoopTimeoutTest(); -}); - -INSTANTIATE_GENERATED_TEST(QuantizationCouplingTest, [](const TestModel& testModel) { - return !testModel.expectFailure && testModel.main.operations.size() == 1 && - testModel.referenced.size() == 0 && testModel.hasQuant8CoupledOperands(); -}); - -INSTANTIATE_GENERATED_TEST(DeviceMemoryTest, [](const TestModel& testModel) { - return !testModel.expectFailure && - std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(), - [&testModel](uint32_t index) { - return testModel.main.operands[index].data.size() > 0; - }); -}); - -INSTANTIATE_GENERATED_TEST(FencedComputeTest, [](const TestModel& testModel) { - return !testModel.expectFailure && - std::all_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(), - [&testModel](uint32_t index) { - return testModel.main.operands[index].data.size() > 0; - }); -}); - -} // namespace android::nn::generated_tests
diff --git a/runtime/test/SupportLibraryTestMain.cpp b/runtime/test/SupportLibraryTestMain.cpp deleted file mode 100644 index 54b60af..0000000 --- a/runtime/test/SupportLibraryTestMain.cpp +++ /dev/null
@@ -1,33 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <gtest/gtest.h> - -#include <iostream> -#include <string> - -std::string SUPPORT_LIBRARY_NAME; - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - if (argc < 2) { - std::cerr << "Usage: SupportLibraryTest <support_library_file_name>" << std::endl; - return -1; - } - SUPPORT_LIBRARY_NAME = argv[1]; - - return RUN_ALL_TESTS(); -}
diff --git a/runtime/test/SupportLibraryTestUtils.h b/runtime/test/SupportLibraryTestUtils.h deleted file mode 100644 index 607ddb0..0000000 --- a/runtime/test/SupportLibraryTestUtils.h +++ /dev/null
@@ -1,83 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_TEST_SUPPORT_LIBRARY_TEST_UTILS_H -#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_TEST_SUPPORT_LIBRARY_TEST_UTILS_H - -#include <android-base/mapped_file.h> -#include <android-base/unique_fd.h> -#include <android/sharedmem.h> - -#include <memory> -#include <utility> - -#include "SupportLibraryWrapper.h" - -namespace android::nn { - -// Convenience class to manage the file, mapping, and memory object. -class TestAshmem { - public: - TestAshmem(::android::base::unique_fd fd, std::unique_ptr<::android::base::MappedFile> mapped, - sl_wrapper::Memory memory) - : mFd(std::move(fd)), mMapped(std::move(mapped)), mMemory(std::move(memory)) {} - - // Factory function for TestAshmem; prefer this over the raw constructor - static std::unique_ptr<TestAshmem> createFrom(const NnApiSupportLibrary* nnapi, - const test_helper::TestBuffer& buffer) { - return createFrom(nnapi, buffer.get<void>(), buffer.size()); - } - - // Factory function for TestAshmem; prefer this over the raw constructor - static std::unique_ptr<TestAshmem> createFrom(const NnApiSupportLibrary* nnapi, - const void* data, uint32_t length) { - // Create ashmem-based fd. - int fd = ASharedMemory_create(nullptr, length); - if (fd <= 0) return nullptr; - ::android::base::unique_fd managedFd(fd); - - // Map and populate ashmem. - auto mappedFile = - ::android::base::MappedFile::FromFd(fd, 0, length, PROT_READ | PROT_WRITE); - if (!mappedFile) return nullptr; - memcpy(mappedFile->data(), data, length); - - // Create NNAPI memory object. - sl_wrapper::Memory memory(nnapi, length, PROT_READ | PROT_WRITE, fd, 0); - if (!memory.isValid()) return nullptr; - - // Store everything in managing class. - return std::make_unique<TestAshmem>(std::move(managedFd), std::move(mappedFile), - std::move(memory)); - } - - size_t size() { return mMapped->size(); } - sl_wrapper::Memory* get() { return &mMemory; } - - template <typename Type> - Type* dataAs() { - return static_cast<Type*>(static_cast<void*>(mMapped->data())); - } - - private: - ::android::base::unique_fd mFd; - std::unique_ptr<::android::base::MappedFile> mMapped; - sl_wrapper::Memory mMemory; -}; - -} // namespace android::nn - -#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_RUNTIME_TEST_SUPPORT_LIBRARY_TEST_UTILS_H
diff --git a/runtime/test/TestCompilationCaching.cpp b/runtime/test/TestCompilationCaching.cpp index 588f0d9..dab33d9 100644 --- a/runtime/test/TestCompilationCaching.cpp +++ b/runtime/test/TestCompilationCaching.cpp
@@ -14,8 +14,6 @@ * limitations under the License. */ -#include <HalInterfaces.h> -#include <SampleDriver.h> #include <android-base/scopeguard.h> #include <gtest/gtest.h> @@ -27,22 +25,22 @@ #include <tuple> #include <vector> -#include "HalUtils.h" +#include "HalInterfaces.h" #include "Manager.h" +#include "SampleDriver.h" #include "TestNeuralNetworksWrapper.h" using namespace android::nn; -namespace hardware = android::hardware; -using WrapperResult = test_wrapper::Result; +using namespace hal; +using Result = test_wrapper::Result; using Type = test_wrapper::Type; -const V1_2::Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; +const Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; template <typename T> using MQDescriptorSync = ::android::hardware::MQDescriptorSync<T>; -using android::sp; namespace android::hardware::neuralnetworks::V1_0 { -::std::ostream& operator<<(::std::ostream& os, V1_3::ErrorStatus errorStatus) { +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) { return os << toString(errorStatus); } @@ -68,10 +66,10 @@ } // Whether the driver is expected to be registered because it can pass initialization. -bool canDeviceBeRegistered(V1_3::ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) { +bool canDeviceBeRegistered(ErrorStatus error, uint32_t numModelCache, uint32_t numDataCache) { constexpr uint32_t maxNumCacheFiles = - static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES); - return error == V1_3::ErrorStatus::NONE && numModelCache <= maxNumCacheFiles && + static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES); + return error == ErrorStatus::NONE && numModelCache <= maxNumCacheFiles && numDataCache <= maxNumCacheFiles; } @@ -96,59 +94,55 @@ private: static constexpr size_t kCacheSize = 256; - class CachingPreparedModel : public V1_3::IPreparedModel { + class CachingPreparedModel : public IPreparedModel { public: CachingPreparedModel() = default; - hardware::Return<V1_0::ErrorStatus> execute(const V1_0::Request&, - const sp<V1_0::IExecutionCallback>&) override { + Return<V1_0::ErrorStatus> execute(const V1_0::Request&, + const sp<V1_0::IExecutionCallback>&) override { return V1_0::ErrorStatus::DEVICE_UNAVAILABLE; } - hardware::Return<V1_0::ErrorStatus> execute_1_2( - const V1_0::Request&, V1_2::MeasureTiming, - const sp<V1_2::IExecutionCallback>&) override { + Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming, + const sp<V1_2::IExecutionCallback>&) override { return V1_0::ErrorStatus::DEVICE_UNAVAILABLE; } - hardware::Return<V1_3::ErrorStatus> execute_1_3( - const V1_3::Request&, V1_2::MeasureTiming, const V1_3::OptionalTimePoint&, - const V1_3::OptionalTimeoutDuration&, - const sp<V1_3::IExecutionCallback>&) override { + Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming, + const OptionalTimePoint&, + const OptionalTimeoutDuration&, + const sp<V1_3::IExecutionCallback>&) override { return V1_3::ErrorStatus::DEVICE_UNAVAILABLE; } - hardware::Return<void> executeSynchronously(const V1_0::Request&, V1_2::MeasureTiming, - executeSynchronously_cb cb) override { + Return<void> executeSynchronously(const V1_0::Request&, MeasureTiming, + executeSynchronously_cb cb) override { cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming); - return hardware::Void(); + return Void(); } - hardware::Return<void> executeSynchronously_1_3(const V1_3::Request&, V1_2::MeasureTiming, - const V1_3::OptionalTimePoint&, - const V1_3::OptionalTimeoutDuration&, - executeSynchronously_1_3_cb cb) override { + Return<void> executeSynchronously_1_3(const V1_3::Request&, MeasureTiming, + const OptionalTimePoint&, + const OptionalTimeoutDuration&, + executeSynchronously_1_3_cb cb) override { cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming); - return hardware::Void(); + return Void(); } - hardware::Return<void> configureExecutionBurst( - const sp<V1_2::IBurstCallback>&, const MQDescriptorSync<V1_2::FmqRequestDatum>&, - const MQDescriptorSync<V1_2::FmqResultDatum>&, - configureExecutionBurst_cb cb) override { + Return<void> configureExecutionBurst(const sp<V1_2::IBurstCallback>&, + const MQDescriptorSync<V1_2::FmqRequestDatum>&, + const MQDescriptorSync<V1_2::FmqResultDatum>&, + configureExecutionBurst_cb cb) override { cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, nullptr); - return hardware::Void(); + return Void(); } - hardware::Return<void> executeFenced(const V1_3::Request&, - const hardware::hidl_vec<hardware::hidl_handle>&, - V1_2::MeasureTiming, const V1_3::OptionalTimePoint&, - const V1_3::OptionalTimeoutDuration&, - const V1_3::OptionalTimeoutDuration&, - executeFenced_cb cb) { - cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, hardware::hidl_handle(nullptr), nullptr); - return hardware::Void(); + Return<void> executeFenced(const hal::Request&, const hidl_vec<hidl_handle>&, MeasureTiming, + const OptionalTimePoint&, const OptionalTimeoutDuration&, + const OptionalTimeoutDuration&, executeFenced_cb cb) { + cb(ErrorStatus::DEVICE_UNAVAILABLE, hidl_handle(nullptr), nullptr); + return Void(); } }; public: - CachingDriver(std::string_view name, V1_3::ErrorStatus errorStatusGetNumCacheFiles, + CachingDriver(std::string_view name, ErrorStatus errorStatusGetNumCacheFiles, uint32_t numModelCache, uint32_t numDataCache, - V1_3::ErrorStatus errorStatusPrepareFromCache) + ErrorStatus errorStatusPrepareFromCache) : SampleDriver(name.data()), mErrorStatusGetNumCacheFiles(errorStatusGetNumCacheFiles), mNumModelCache(numModelCache), @@ -162,40 +156,39 @@ ~CachingDriver() override {} // Reports faster than cpu. - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { android::nn::initVLogMask(); - const V1_0::PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1}; - V1_3::Capabilities capabilities = { + const PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1}; + Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = kPerf, .relaxedFloat32toFloat16PerformanceTensor = kPerf, .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf), .ifPerformance = kPerf, .whilePerformance = kPerf}; cb(V1_3::ErrorStatus::NONE, capabilities); - return hardware::Void(); + return Void(); } // Reports supporting all operations. - hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model, - getSupportedOperations_1_3_cb cb) override { + Return<void> getSupportedOperations_1_3(const Model& model, + getSupportedOperations_1_3_cb cb) override { std::vector<bool> supported(model.main.operations.size(), true); cb(V1_3::ErrorStatus::NONE, supported); - return hardware::Void(); + return Void(); } // Reports according to mGetNumCacheFiles. - hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override { + Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override { cb(convertToV1_0(mErrorStatusGetNumCacheFiles), mNumModelCache, mNumDataCache); - return hardware::Void(); + return Void(); } // Generates CachingPreparedModel. // Writes the cache entry per mCacheXData and sets mHasCalledPrepareModel. - hardware::Return<V1_3::ErrorStatus> prepareModel_1_3( - const V1_3::Model&, V1_1::ExecutionPreference, V1_3::Priority, - const V1_3::OptionalTimePoint&, - const hardware::hidl_vec<hardware::hidl_handle>& modelCacheHandle, - const hardware::hidl_vec<hardware::hidl_handle>& dataCacheHandle, const HalCacheToken&, + Return<V1_3::ErrorStatus> prepareModel_1_3( + const Model&, ExecutionPreference, Priority, const OptionalTimePoint&, + const hidl_vec<hidl_handle>& modelCacheHandle, + const hidl_vec<hidl_handle>& dataCacheHandle, const CacheToken&, const sp<V1_3::IPreparedModelCallback>& cb) override { checkNumberOfCacheHandles(modelCacheHandle.size(), dataCacheHandle.size()); if (modelCacheHandle.size() != 0 || dataCacheHandle.size() != 0) { @@ -211,10 +204,9 @@ // Checks if the cache entry is correct, notifies error status according to // mErrorStatusPrepareFromCache, sets mHasCalledPrepareModelFromCache. - hardware::Return<V1_3::ErrorStatus> prepareModelFromCache_1_3( - const V1_3::OptionalTimePoint&, - const hardware::hidl_vec<hardware::hidl_handle>& modelCacheHandle, - const hardware::hidl_vec<hardware::hidl_handle>& dataCacheHandle, const HalCacheToken&, + Return<V1_3::ErrorStatus> prepareModelFromCache_1_3( + const OptionalTimePoint&, const hidl_vec<hidl_handle>& modelCacheHandle, + const hidl_vec<hidl_handle>& dataCacheHandle, const CacheToken&, const sp<V1_3::IPreparedModelCallback>& callback) override { readFromCache(modelCacheHandle, mModelCacheData); readFromCache(dataCacheHandle, mDataCacheData); @@ -244,8 +236,7 @@ } } - void writeToCache(const hardware::hidl_vec<hardware::hidl_handle>& handles, - const std::vector<uint8_t>& cache) { + void writeToCache(const hidl_vec<hidl_handle>& handles, const std::vector<uint8_t>& cache) { for (uint32_t i = 0; i < handles.size(); ++i) { ASSERT_EQ(handles[i]->numFds, 1); EXPECT_EQ(write(handles[i]->data[0], cache.data(), kCacheSize), @@ -253,8 +244,7 @@ } } - void readFromCache(const hardware::hidl_vec<hardware::hidl_handle>& handles, - const std::vector<uint8_t>& expected) { + void readFromCache(const hidl_vec<hidl_handle>& handles, const std::vector<uint8_t>& expected) { for (uint32_t i = 0; i < handles.size(); ++i) { ASSERT_EQ(handles[i]->numFds, 1); std::vector<uint8_t> actual(kCacheSize); @@ -267,10 +257,10 @@ std::vector<uint8_t> mModelCacheData; std::vector<uint8_t> mDataCacheData; - const V1_3::ErrorStatus mErrorStatusGetNumCacheFiles; + const ErrorStatus mErrorStatusGetNumCacheFiles; const uint32_t mNumModelCache; const uint32_t mNumDataCache; - const V1_3::ErrorStatus mErrorStatusPrepareFromCache; + const ErrorStatus mErrorStatusPrepareFromCache; bool mHasCalledPrepareModelFromCache = false; HasCalledPrepareModel mHasCalledPrepareModel = HasCalledPrepareModel::NO; @@ -289,7 +279,7 @@ model->addOperation(ANEURALNETWORKS_ADD, {a, b, d}, {c}); model->identifyInputsAndOutputs({a, b}, {c}); ASSERT_TRUE(model->isValid()); - ASSERT_EQ(model->finish(), WrapperResult::NO_ERROR); + ASSERT_EQ(model->finish(), Result::NO_ERROR); } void getDeviceWithName(std::string_view deviceName, const ANeuralNetworksDevice** outputDevice) { @@ -317,17 +307,17 @@ // - ErrorStatus returning from getNumberOfCacheFilesNeeded // - Number of model cache files returning from getNumberOfCacheFilesNeeded // - Number of data cache files returning from getNumberOfCacheFilesNeeded -using DeviceRegistrationTestParam = std::tuple<V1_3::ErrorStatus, uint32_t, uint32_t>; +using DeviceRegistrationTestParam = std::tuple<ErrorStatus, uint32_t, uint32_t>; class DeviceRegistrationTest : public ::testing::TestWithParam<DeviceRegistrationTestParam> { protected: static constexpr std::string_view kDeviceName = "deviceTestCompilationCaching"; - const V1_3::ErrorStatus kErrorStatusGetNumCacheFiles = std::get<0>(GetParam()); + const ErrorStatus kErrorStatusGetNumCacheFiles = std::get<0>(GetParam()); const uint32_t kNumModelCache = std::get<1>(GetParam()); const uint32_t kNumDataCache = std::get<2>(GetParam()); const sp<CachingDriver> kDriver = new CachingDriver(kDeviceName, kErrorStatusGetNumCacheFiles, kNumModelCache, - kNumDataCache, V1_3::ErrorStatus::NONE); + kNumDataCache, ErrorStatus::NONE); }; TEST_P(DeviceRegistrationTest, CachingFailure) { @@ -335,7 +325,7 @@ return; } - DeviceManager::get()->forTest_registerDevice(makeSharedDevice(kDeviceName.data(), kDriver)); + DeviceManager::get()->forTest_registerDevice(kDeviceName.data(), kDriver); const auto cleanup = android::base::make_scope_guard( [] { DeviceManager::get()->forTest_reInitializeDeviceList(); }); @@ -354,13 +344,12 @@ // - Number of model cache files returning from getNumberOfCacheFilesNeeded // - Number of data cache files returning from getNumberOfCacheFilesNeeded // - ErrorStatus returning from prepareModelFromCache_1_3 -using CompilationCachingTestParam = std::tuple<uint32_t, uint32_t, V1_3::ErrorStatus>; +using CompilationCachingTestParam = std::tuple<uint32_t, uint32_t, ErrorStatus>; class CompilationCachingTest : public ::testing::TestWithParam<CompilationCachingTestParam> { protected: virtual void SetUp() override { - char cacheDirTemp[] = - "/data/local/tmp/AVeryLongDirectoryNameForTestCompilationCachingXXXXXX"; + char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX"; char* cacheDir = mkdtemp(cacheDirTemp); ASSERT_NE(cacheDir, nullptr); mCacheDir = cacheDir; @@ -374,7 +363,7 @@ } void compileModel(const sp<CachingDriver>& driver, bool withToken) { - DeviceManager::get()->forTest_registerDevice(makeSharedDevice(kDeviceName.data(), driver)); + DeviceManager::get()->forTest_registerDevice(kDeviceName.data(), driver); const auto cleanup = android::base::make_scope_guard( [] { DeviceManager::get()->forTest_reInitializeDeviceList(); }); @@ -394,35 +383,30 @@ ANEURALNETWORKS_NO_ERROR); } ASSERT_EQ(ANeuralNetworksCompilation_finish(compilation), ANEURALNETWORKS_NO_ERROR); - - // close memory - ANeuralNetworksCompilation_free(compilation); } void createCache() { - sp<CachingDriver> driver = - new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, - kNumDataCache, V1_3::ErrorStatus::NONE); + sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache, + kNumDataCache, ErrorStatus::NONE); compileModel(driver, /*withToken=*/true); } static constexpr std::string_view kDeviceName = "deviceTestCompilationCaching"; const uint32_t kNumModelCache = std::get<0>(GetParam()); const uint32_t kNumDataCache = std::get<1>(GetParam()); - const V1_3::ErrorStatus kErrorStatusPrepareFromCache = std::get<2>(GetParam()); + const ErrorStatus kErrorStatusPrepareFromCache = std::get<2>(GetParam()); const bool kIsCachingSupported = isCachingSupported(kNumModelCache, kNumDataCache); test_wrapper::Model mModel; std::string mCacheDir; - const HalCacheToken kToken{}; + const CacheToken kToken{}; }; TEST_P(CompilationCachingTest, TokenProvidedAndCacheNotExist) { if (DeviceManager::get()->getUseCpuOnly()) { return; } - sp<CachingDriver> driver = - new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, kNumDataCache, - kErrorStatusPrepareFromCache); + sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache, + kNumDataCache, kErrorStatusPrepareFromCache); compileModel(driver, /*withToken=*/true); // When cache file does not exist, the runtime should never call prepareModelFromCache_1_3. @@ -439,9 +423,8 @@ return; } createCache(); - sp<CachingDriver> driver = - new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, kNumDataCache, - kErrorStatusPrepareFromCache); + sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache, + kNumDataCache, kErrorStatusPrepareFromCache); compileModel(driver, /*withToken=*/true); // When cache files exist, the runtime should call prepareModelFromCache_1_3 iff caching @@ -450,7 +433,7 @@ HasCalledPrepareModel expectHasCalledPrepareModel; if (kIsCachingSupported) { - if (kErrorStatusPrepareFromCache == V1_3::ErrorStatus::NONE) { + if (kErrorStatusPrepareFromCache == ErrorStatus::NONE) { // The runtime should not call prepareModel_1_3 iff caching supported and // prepareModelFromCache_1_3 succeeds. expectHasCalledPrepareModel = HasCalledPrepareModel::NO; @@ -470,9 +453,8 @@ if (DeviceManager::get()->getUseCpuOnly()) { return; } - sp<CachingDriver> driver = - new CachingDriver(kDeviceName, V1_3::ErrorStatus::NONE, kNumModelCache, kNumDataCache, - kErrorStatusPrepareFromCache); + sp<CachingDriver> driver = new CachingDriver(kDeviceName, ErrorStatus::NONE, kNumModelCache, + kNumDataCache, kErrorStatusPrepareFromCache); compileModel(driver, /*withToken=*/false); // When no NDK token is provided by the client, the runtime should never call @@ -482,22 +464,22 @@ } static const auto kErrorStatusGetNumCacheFilesChoices = - testing::Values(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::DEVICE_UNAVAILABLE); + testing::Values(ErrorStatus::NONE, ErrorStatus::DEVICE_UNAVAILABLE); static const auto kNumCacheChoices = - testing::Values(0ul, 1ul, static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES), - static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES) + 1); + testing::Values(0ul, 1ul, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES), + static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES) + 1); static const auto kNumValidCacheChoices = - testing::Values(0ul, 1ul, static_cast<uint32_t>(V1_2::Constant::MAX_NUMBER_OF_CACHE_FILES)); + testing::Values(0ul, 1ul, static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES)); static const auto kErrorStatusPrepareFromCacheChoices = - testing::Values(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::GENERAL_FAILURE, - V1_3::ErrorStatus::DEVICE_UNAVAILABLE, V1_3::ErrorStatus::INVALID_ARGUMENT); + testing::Values(ErrorStatus::NONE, ErrorStatus::GENERAL_FAILURE, + ErrorStatus::DEVICE_UNAVAILABLE, ErrorStatus::INVALID_ARGUMENT); -INSTANTIATE_TEST_SUITE_P(TestCompilationCaching, DeviceRegistrationTest, - testing::Combine(kErrorStatusGetNumCacheFilesChoices, kNumCacheChoices, - kNumCacheChoices)); +INSTANTIATE_TEST_CASE_P(TestCompilationCaching, DeviceRegistrationTest, + testing::Combine(kErrorStatusGetNumCacheFilesChoices, kNumCacheChoices, + kNumCacheChoices)); -INSTANTIATE_TEST_SUITE_P(TestCompilationCaching, CompilationCachingTest, - testing::Combine(kNumValidCacheChoices, kNumValidCacheChoices, - kErrorStatusPrepareFromCacheChoices)); +INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingTest, + testing::Combine(kNumValidCacheChoices, kNumValidCacheChoices, + kErrorStatusPrepareFromCacheChoices)); } // namespace
diff --git a/runtime/test/TestCompliance.cpp b/runtime/test/TestCompliance.cpp index 815ea5c..db5ab4d 100644 --- a/runtime/test/TestCompliance.cpp +++ b/runtime/test/TestCompliance.cpp
@@ -14,19 +14,19 @@ * limitations under the License. */ -#include <HalInterfaces.h> -#include <MemoryUtils.h> -#include <Utils.h> -#include <android-base/scopeguard.h> #include <gtest/gtest.h> #include "GeneratedTestUtils.h" +#include "HalInterfaces.h" #include "Memory.h" +#include "MemoryUtils.h" #include "ModelBuilder.h" #include "TestNeuralNetworksWrapper.h" +#include "Utils.h" namespace android::nn::compliance_test { +using namespace hal; using namespace test_helper; using HidlModel = V1_3::Model; using WrapperModel = test_wrapper::Model; @@ -41,7 +41,7 @@ auto modelBuilder = reinterpret_cast<const ModelBuilder*>(wrapperModel.getHandle()); EXPECT_TRUE(modelBuilder->isFinished()); EXPECT_TRUE(modelBuilder->isValid()); - return convertToV1_3(modelBuilder->makeModel()); + return modelBuilder->makeHidlModel(); } static void testAvailableSinceV1_3(const WrapperModel& wrapperModel) { @@ -72,12 +72,12 @@ ASSERT_TRUE(compliantWithV1_0(hidlModel)); } -static void testAvailableSinceV1_2(const V1_3::Request& request) { +static void testAvailableSinceV1_2(const Request& request) { ASSERT_FALSE(compliantWithV1_0(request)); ASSERT_TRUE(compliantWithV1_2(request)); } -static void testAvailableSinceV1_3(const V1_3::Request& request) { +static void testAvailableSinceV1_3(const Request& request) { ASSERT_FALSE(compliantWithV1_0(request)); ASSERT_FALSE(compliantWithV1_2(request)); } @@ -133,9 +133,6 @@ testAvailableSinceV1_2(model); } -// Hardware buffers are an Android concept, which aren't necessarily -// available on other platforms such as ChromeOS, which also build NNAPI. -#if defined(__ANDROID__) TEST_F(ComplianceTest, HardwareBufferModel) { const size_t memorySize = 20; AHardwareBuffer_Desc desc{ @@ -148,9 +145,6 @@ AHardwareBuffer* buffer = nullptr; ASSERT_EQ(AHardwareBuffer_allocate(&desc, &buffer), 0); - auto allocateGuard = - android::base::make_scope_guard([buffer]() { AHardwareBuffer_release(buffer); }); - test_wrapper::Memory memory(buffer); ASSERT_TRUE(memory.isValid()); @@ -166,48 +160,48 @@ ASSERT_TRUE(model.isValid()); model.finish(); testAvailableSinceV1_2(model); + + AHardwareBuffer_release(buffer); } TEST_F(ComplianceTest, HardwareBufferRequest) { const auto [n, ahwb] = MemoryRuntimeAHWB::create(1024); ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR); - V1_3::Request::MemoryPool sharedMemoryPool, - ahwbMemoryPool = convertToV1_3(ahwb->getMemoryPool()); + Request::MemoryPool sharedMemoryPool, ahwbMemoryPool = ahwb->getMemoryPool(); sharedMemoryPool.hidlMemory(allocateSharedMemory(1024)); ASSERT_TRUE(sharedMemoryPool.hidlMemory().valid()); ASSERT_TRUE(ahwbMemoryPool.hidlMemory().valid()); // AHardwareBuffer as input. - testAvailableSinceV1_2(V1_3::Request{ + testAvailableSinceV1_2(Request{ .inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}}, .outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}}, .pools = {ahwbMemoryPool, sharedMemoryPool}, }); // AHardwareBuffer as output. - testAvailableSinceV1_2(V1_3::Request{ + testAvailableSinceV1_2(Request{ .inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}}, .outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}}, .pools = {sharedMemoryPool, ahwbMemoryPool}, }); } -#endif TEST_F(ComplianceTest, DeviceMemory) { - V1_3::Request::MemoryPool sharedMemoryPool, deviceMemoryPool; + Request::MemoryPool sharedMemoryPool, deviceMemoryPool; sharedMemoryPool.hidlMemory(allocateSharedMemory(1024)); ASSERT_TRUE(sharedMemoryPool.hidlMemory().valid()); deviceMemoryPool.token(1); // Device memory as input. - testAvailableSinceV1_3(V1_3::Request{ + testAvailableSinceV1_3(Request{ .inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}}, .outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}}, .pools = {deviceMemoryPool, sharedMemoryPool}, }); // Device memory as output. - testAvailableSinceV1_3(V1_3::Request{ + testAvailableSinceV1_3(Request{ .inputs = {{.hasNoValue = false, .location = {.poolIndex = 0}, .dimensions = {}}}, .outputs = {{.hasNoValue = false, .location = {.poolIndex = 1}, .dimensions = {}}}, .pools = {sharedMemoryPool, deviceMemoryPool},
diff --git a/runtime/test/TestControlFlow.cpp b/runtime/test/TestControlFlow.cpp index 1fdfa69..dc7a099 100644 --- a/runtime/test/TestControlFlow.cpp +++ b/runtime/test/TestControlFlow.cpp
@@ -16,21 +16,17 @@ #define LOG_TAG "TestControlFlow" -#include <ControlFlow.h> #include <android-base/logging.h> #include <gtest/gtest.h> +#include "ControlFlow.h" #include "TestNeuralNetworksWrapper.h" -namespace android::nn { +namespace android { +namespace nn { namespace { -using test_wrapper::Compilation; -using test_wrapper::Execution; -using test_wrapper::Model; -using test_wrapper::OperandType; -using test_wrapper::Result; -using test_wrapper::Type; +using namespace test_wrapper; constexpr uint64_t kMillisecondsInNanosecond = 1'000'000; constexpr int32_t kNoActivation = ANEURALNETWORKS_FUSED_NONE; @@ -110,4 +106,5 @@ } } // end namespace -} // namespace android::nn +} // namespace nn +} // namespace android
diff --git a/runtime/test/TestExecution.cpp b/runtime/test/TestExecution.cpp index 4b15950..aaeae80 100644 --- a/runtime/test/TestExecution.cpp +++ b/runtime/test/TestExecution.cpp
@@ -14,11 +14,10 @@ * limitations under the License. */ -#include <HalInterfaces.h> -#include <SampleDriver.h> -#include <ValidateHal.h> #include <gtest/gtest.h> +#include <android-base/scopeguard.h> + #include <algorithm> #include <atomic> #include <cassert> @@ -28,67 +27,62 @@ #include <tuple> #include <vector> +#include "Callbacks.h" #include "CompilationBuilder.h" -#include "ExecutionBurstServer.h" -#include "ExecutionCallback.h" -#include "HalUtils.h" +#include "HalInterfaces.h" #include "Manager.h" #include "ModelBuilder.h" #include "NeuralNetworks.h" -#include "PreparedModelCallback.h" +#include "SampleDriver.h" #include "TestNeuralNetworksWrapper.h" +#include "Utils.h" +#include "ValidateHal.h" namespace android { -namespace V1_0 = ::android::hardware::neuralnetworks::V1_0; -namespace V1_1 = ::android::hardware::neuralnetworks::V1_1; -namespace V1_2 = ::android::hardware::neuralnetworks::V1_2; -namespace V1_3 = ::android::hardware::neuralnetworks::V1_3; +using namespace nn::hal; using CompilationBuilder = nn::CompilationBuilder; using Device = nn::Device; -using SharedDevice = nn::SharedDevice; using DeviceManager = nn::DeviceManager; using HidlModel = V1_3::Model; using PreparedModelCallback = nn::PreparedModelCallback; +using Result = nn::test_wrapper::Result; using SampleDriver = nn::sample_driver::SampleDriver; using WrapperCompilation = nn::test_wrapper::Compilation; using WrapperEvent = nn::test_wrapper::Event; using WrapperExecution = nn::test_wrapper::Execution; using WrapperModel = nn::test_wrapper::Model; using WrapperOperandType = nn::test_wrapper::OperandType; -using WrapperResult = nn::test_wrapper::Result; using WrapperType = nn::test_wrapper::Type; using nn::convertToV1_0; -using nn::convertToV1_3; -using nn::ErrorStatus; template <typename T> using MQDescriptorSync = hardware::MQDescriptorSync<T>; namespace { -const V1_2::Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; +const Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; // Wraps the latest version of IPreparedModel to allow dummying up the execution status, // and control when the execution finishes. -class TestPreparedModelLatest : public V1_3::IPreparedModel { +class TestPreparedModelLatest : public IPreparedModel { public: // If errorStatus is NONE, then execute behaves normally (and sends back // the actual execution status). Otherwise, don't bother to execute, and // just send back errorStatus (as the execution status, not the launch // status). - TestPreparedModelLatest(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus) + TestPreparedModelLatest(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus) : mPreparedModelV1_0(preparedModel), mPreparedModelV1_2(V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)), mPreparedModelV1_3(V1_3::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)), mErrorStatus(errorStatus) {} - hardware::Return<V1_0::ErrorStatus> execute( - const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override { + Return<V1_0::ErrorStatus> execute(const V1_0::Request& request, + const sp<V1_0::IExecutionCallback>& callback) override { CHECK(mPreparedModelV1_0 != nullptr) << "V1_0 prepared model is nullptr."; - std::thread([this, request, callback] { + std::thread([this, &request, &callback] { dummyExecution(); - if (mErrorStatus == V1_3::ErrorStatus::NONE) { + if (mErrorStatus == ErrorStatus::NONE) { // Note that we lose the actual launch status. (void)mPreparedModelV1_0->execute(request, callback); } else { @@ -98,17 +92,16 @@ return V1_0::ErrorStatus::NONE; } - hardware::Return<V1_0::ErrorStatus> execute_1_2( - const V1_0::Request& request, V1_2::MeasureTiming measure, - const sp<V1_2::IExecutionCallback>& callback) override { + Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure, + const sp<V1_2::IExecutionCallback>& callback) override { CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr."; - std::thread([this, request, measure, callback] { + std::thread([this, &request, measure, &callback] { dummyExecution(); - if (mErrorStatus == V1_3::ErrorStatus::NONE) { + if (mErrorStatus == ErrorStatus::NONE) { // Note that we lose the actual launch status. (void)mPreparedModelV1_2->execute_1_2(request, measure, callback); - } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false}; + } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + OutputShape shape = {.dimensions = {1}, .isSufficient = false}; callback->notify_1_2(convertToV1_0(mErrorStatus), {shape}, kBadTiming); } else { callback->notify_1_2(convertToV1_0(mErrorStatus), {}, kBadTiming); @@ -117,20 +110,19 @@ return V1_0::ErrorStatus::NONE; } - hardware::Return<V1_3::ErrorStatus> execute_1_3( - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - const sp<V1_3::IExecutionCallback>& callback) override { + Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request& request, MeasureTiming measure, + const OptionalTimePoint& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + const sp<V1_3::IExecutionCallback>& callback) override { CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr."; - std::thread([this, request, measure, deadline, loopTimeoutDuration, callback] { + std::thread([this, &request, measure, &deadline, &loopTimeoutDuration, &callback] { dummyExecution(); - if (mErrorStatus == V1_3::ErrorStatus::NONE) { + if (mErrorStatus == ErrorStatus::NONE) { // Note that we lose the actual launch status. (void)mPreparedModelV1_3->execute_1_3(request, measure, deadline, loopTimeoutDuration, callback); - } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false}; + } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + OutputShape shape = {.dimensions = {1}, .isSufficient = false}; callback->notify_1_3(mErrorStatus, {shape}, kBadTiming); } else { callback->notify_1_3(mErrorStatus, {}, kBadTiming); @@ -139,62 +131,53 @@ return V1_3::ErrorStatus::NONE; } - hardware::Return<void> executeSynchronously(const V1_0::Request& request, - V1_2::MeasureTiming measure, - executeSynchronously_cb cb) override { + Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure, + executeSynchronously_cb cb) override { CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr."; dummyExecution(); - if (mErrorStatus == V1_3::ErrorStatus::NONE) { + if (mErrorStatus == ErrorStatus::NONE) { return mPreparedModelV1_2->executeSynchronously(request, measure, cb); - } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false}; + } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + OutputShape shape = {.dimensions = {1}, .isSufficient = false}; cb(convertToV1_0(mErrorStatus), {shape}, kBadTiming); - return hardware::Void(); + return Void(); } else { cb(convertToV1_0(mErrorStatus), {}, kBadTiming); - return hardware::Void(); + return Void(); } } - hardware::Return<void> executeSynchronously_1_3( - const V1_3::Request& request, V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - executeSynchronously_1_3_cb cb) override { + Return<void> executeSynchronously_1_3(const V1_3::Request& request, MeasureTiming measure, + const OptionalTimePoint& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + executeSynchronously_1_3_cb cb) override { CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr."; dummyExecution(); - if (mErrorStatus == V1_3::ErrorStatus::NONE) { + if (mErrorStatus == ErrorStatus::NONE) { return mPreparedModelV1_3->executeSynchronously_1_3(request, measure, deadline, loopTimeoutDuration, cb); - } else if (mErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - V1_2::OutputShape shape = {.dimensions = {1}, .isSufficient = false}; + } else if (mErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { + OutputShape shape = {.dimensions = {1}, .isSufficient = false}; cb(mErrorStatus, {shape}, kBadTiming); - return hardware::Void(); + return Void(); } else { cb(mErrorStatus, {}, kBadTiming); - return hardware::Void(); + return Void(); } } - // ExecutionBurstServer::create has an overload that will use - // IPreparedModel::executeSynchronously(), so we can rely on that, rather - // than having to implement ExecutionBurstServer::IExecutorWithCache. - hardware::Return<void> configureExecutionBurst( + Return<void> configureExecutionBurst( const sp<V1_2::IBurstCallback>& callback, const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel, const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, configureExecutionBurst_cb cb) override { CHECK(mPreparedModelV1_2 != nullptr) << "V1_2 prepared model is nullptr."; - if (mErrorStatus == V1_3::ErrorStatus::NONE) { - const sp<V1_2::IBurstContext> burst = - nn::ExecutionBurstServer::create(callback, requestChannel, resultChannel, this); - - cb(burst == nullptr ? V1_0::ErrorStatus::GENERAL_FAILURE : V1_0::ErrorStatus::NONE, - burst); - return hardware::Void(); + if (mErrorStatus == ErrorStatus::NONE) { + return mPreparedModelV1_2->configureExecutionBurst(callback, requestChannel, + resultChannel, cb); } else { cb(convertToV1_0(mErrorStatus), nullptr); - return hardware::Void(); + return Void(); } } @@ -203,27 +186,25 @@ // SampleDriver is written with that in mind. Therefore, this // implementation is synchronous also. If the SampleDriver is updated to // return real sync fence, this must be updated. - hardware::Return<void> executeFenced(const V1_3::Request& request, - const hardware::hidl_vec<hardware::hidl_handle>& waitFor, - V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint& deadline, - const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, - const V1_3::OptionalTimeoutDuration& duration, - executeFenced_cb cb) override { + Return<void> executeFenced(const V1_3::Request& request, const hidl_vec<hidl_handle>& waitFor, + MeasureTiming measure, const OptionalTimePoint& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + const OptionalTimeoutDuration& duration, + executeFenced_cb cb) override { CHECK(mPreparedModelV1_3 != nullptr) << "V1_3 prepared model is nullptr."; - CHECK(mErrorStatus != V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) + CHECK(mErrorStatus != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) << "executeFenced does not support dynamic output shape"; dummyExecution(); - if (mErrorStatus == V1_3::ErrorStatus::NONE) { + if (mErrorStatus == ErrorStatus::NONE) { return mPreparedModelV1_3->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration, duration, cb); } else { // Due to the limitations of the SampleDriver, all failures look // like launch failures. If the SampleDriver is updated to return // real sync fences, this must be updated. - cb(mErrorStatus, hardware::hidl_handle(nullptr), nullptr); + cb(mErrorStatus, hidl_handle(nullptr), nullptr); } - return hardware::Void(); + return Void(); } // We can place the TestPreparedModelLatest system in a "pause" mode where @@ -232,20 +213,13 @@ static void pauseExecutions(bool v) { mPauseExecutions.store(v); } // This function is only guaranteed to work in the following pattern: - // Consider thread A as primary thread - // - thread A: pauseExecutions(true); - // - thread A: launch execution (as thread B) - // - thread A: waitForExecutionToBegin(), block until call to dummyExecution by - // thread B makes mExecutionsInFlight nonzero - // - thread B: dummyExecution(), which makes mExecutionsInFlight nonzero and blocks - // until thread A calls pauseExecutions(false) - // - thread A: waitForExecutionToBegin() returns - // - thread A: pauseExecutions(false), allowing dummyExecution() on thread B to continue - // - thread B: dummyExecution() zeroes mExecutionsInFlight and returns - // - thread B: thread exits + // - pauseExecutions(true); + // - // launch execution + // - // thread A: waitForExecutionToBegin() + // - // thread B: pauseExecutions(false); static void waitForExecutionToBegin() { CHECK(mPauseExecutions.load()); - while (mExecutionsInFlight.load() == 0) { + while (mExecutionsInFlight.load()) { } } @@ -253,7 +227,7 @@ const sp<V1_0::IPreparedModel> mPreparedModelV1_0; const sp<V1_2::IPreparedModel> mPreparedModelV1_2; const sp<V1_3::IPreparedModel> mPreparedModelV1_3; - V1_3::ErrorStatus mErrorStatus; + ErrorStatus mErrorStatus; static std::atomic<bool> mPauseExecutions; static std::atomic<unsigned int> mExecutionsInFlight; @@ -273,27 +247,25 @@ // Like TestPreparedModelLatest, but implementing 1.2 class TestPreparedModel12 : public V1_2::IPreparedModel { public: - TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus) + TestPreparedModel12(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus) : mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {} - hardware::Return<V1_0::ErrorStatus> execute( - const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override { + Return<V1_0::ErrorStatus> execute(const V1_0::Request& request, + const sp<V1_0::IExecutionCallback>& callback) override { return mLatestPreparedModel->execute(request, callback); } - hardware::Return<V1_0::ErrorStatus> execute_1_2( - const V1_0::Request& request, V1_2::MeasureTiming measure, - const sp<V1_2::IExecutionCallback>& callback) override { + Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure, + const sp<V1_2::IExecutionCallback>& callback) override { return mLatestPreparedModel->execute_1_2(request, measure, callback); } - hardware::Return<void> executeSynchronously(const V1_0::Request& request, - V1_2::MeasureTiming measure, - executeSynchronously_cb cb) override { + Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure, + executeSynchronously_cb cb) override { return mLatestPreparedModel->executeSynchronously(request, measure, cb); } - hardware::Return<void> configureExecutionBurst( + Return<void> configureExecutionBurst( const sp<V1_2::IBurstCallback>& callback, const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel, const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, @@ -303,22 +275,22 @@ } private: - const sp<V1_3::IPreparedModel> mLatestPreparedModel; + const sp<IPreparedModel> mLatestPreparedModel; }; // Like TestPreparedModelLatest, but implementing 1.0 class TestPreparedModel10 : public V1_0::IPreparedModel { public: - TestPreparedModel10(sp<V1_0::IPreparedModel> preparedModel, V1_3::ErrorStatus errorStatus) + TestPreparedModel10(sp<V1_0::IPreparedModel> preparedModel, ErrorStatus errorStatus) : mLatestPreparedModel(new TestPreparedModelLatest(preparedModel, errorStatus)) {} - hardware::Return<V1_0::ErrorStatus> execute( - const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override { + Return<V1_0::ErrorStatus> execute(const V1_0::Request& request, + const sp<V1_0::IExecutionCallback>& callback) override { return mLatestPreparedModel->execute(request, callback); } private: - const sp<V1_3::IPreparedModel> mLatestPreparedModel; + const sp<IPreparedModel> mLatestPreparedModel; }; // Behaves like SampleDriver, except that it produces wrapped IPreparedModel. @@ -330,13 +302,13 @@ // status). Otherwise, don't bother to execute, and just send // back errorStatus (as the execution status, not the launch // status). - TestDriver13(const std::string& name, V1_3::ErrorStatus errorStatus) + TestDriver13(const std::string& name, ErrorStatus errorStatus) : SampleDriver(name.c_str()), mErrorStatus(errorStatus) {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override { + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override { android::nn::initVLogMask(); - const V1_0::PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f}; - V1_3::Capabilities capabilities = { + const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f}; + Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = kPerf, .relaxedFloat32toFloat16PerformanceTensor = kPerf, .operandPerformance = @@ -344,43 +316,41 @@ .ifPerformance = kPerf, .whilePerformance = kPerf}; _hidl_cb(V1_3::ErrorStatus::NONE, capabilities); - return hardware::Void(); + return Void(); } - hardware::Return<void> getSupportedOperations_1_3(const HidlModel& model, - getSupportedOperations_1_3_cb cb) override { + Return<void> getSupportedOperations_1_3(const HidlModel& model, + getSupportedOperations_1_3_cb cb) override { if (nn::validateModel(model)) { std::vector<bool> supported(model.main.operations.size(), true); cb(V1_3::ErrorStatus::NONE, supported); } else { cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {}); } - return hardware::Void(); + return Void(); } - hardware::Return<V1_3::ErrorStatus> prepareModel_1_3( - const HidlModel& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, - const V1_3::OptionalTimePoint& deadline, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, - const nn::HalCacheToken& token, + Return<V1_3::ErrorStatus> prepareModel_1_3( + const HidlModel& model, ExecutionPreference preference, Priority priority, + const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache, + const hidl_vec<hidl_handle>& dataCache, const CacheToken& token, const sp<V1_3::IPreparedModelCallback>& actualCallback) override { sp<PreparedModelCallback> localCallback = new PreparedModelCallback; - hardware::Return<V1_3::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_3( + Return<V1_3::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_3( model, preference, priority, deadline, modelCache, dataCache, token, localCallback); if (!prepareModelReturn.isOkUnchecked()) { return prepareModelReturn; } - if (prepareModelReturn != V1_3::ErrorStatus::NONE) { + if (prepareModelReturn != ErrorStatus::NONE) { actualCallback->notify_1_3( - convertToV1_3(localCallback->getStatus()), + localCallback->getStatus(), V1_3::IPreparedModel::castFrom(localCallback->getPreparedModel())); return prepareModelReturn; } localCallback->wait(); if (localCallback->getStatus() != ErrorStatus::NONE) { actualCallback->notify_1_3( - convertToV1_3(localCallback->getStatus()), + localCallback->getStatus(), V1_3::IPreparedModel::castFrom(localCallback->getPreparedModel())); } else { actualCallback->notify_1_3( @@ -390,14 +360,13 @@ return prepareModelReturn; } - hardware::Return<V1_0::ErrorStatus> prepareModel_1_2( - const V1_2::Model& model, V1_1::ExecutionPreference preference, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, - const nn::HalCacheToken& token, + Return<V1_0::ErrorStatus> prepareModel_1_2( + const V1_2::Model& model, ExecutionPreference preference, + const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache, + const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& actualCallback) override { sp<PreparedModelCallback> localCallback = new PreparedModelCallback; - hardware::Return<V1_0::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_2( + Return<V1_0::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_2( model, preference, modelCache, dataCache, token, localCallback); if (!prepareModelReturn.isOkUnchecked()) { return prepareModelReturn; @@ -421,11 +390,11 @@ return prepareModelReturn; } - hardware::Return<V1_0::ErrorStatus> prepareModel_1_1( - const V1_1::Model& model, V1_1::ExecutionPreference preference, + Return<V1_0::ErrorStatus> prepareModel_1_1( + const V1_1::Model& model, ExecutionPreference preference, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { sp<PreparedModelCallback> localCallback = new PreparedModelCallback; - hardware::Return<V1_0::ErrorStatus> prepareModelReturn = + Return<V1_0::ErrorStatus> prepareModelReturn = SampleDriver::prepareModel_1_1(model, preference, localCallback); if (!prepareModelReturn.isOkUnchecked()) { return prepareModelReturn; @@ -447,79 +416,75 @@ return prepareModelReturn; } - hardware::Return<V1_0::ErrorStatus> prepareModel( + Return<V1_0::ErrorStatus> prepareModel( const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { - return prepareModel_1_1(nn::convertToV1_1(model), - V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, actualCallback); + return prepareModel_1_1(nn::convertToV1_1(model), ExecutionPreference::FAST_SINGLE_ANSWER, + actualCallback); } private: - V1_3::ErrorStatus mErrorStatus; + ErrorStatus mErrorStatus; }; // Like TestDriver, but implementing 1.2 class TestDriver12 : public V1_2::IDevice { public: - TestDriver12(const std::string& name, V1_3::ErrorStatus errorStatus) + TestDriver12(const std::string& name, ErrorStatus errorStatus) : mLatestDriver(new TestDriver13(name, errorStatus)) {} - hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override { + Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override { return mLatestDriver->getCapabilities_1_2(_hidl_cb); } - hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { + Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { return mLatestDriver->getCapabilities_1_1(_hidl_cb); } - hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { + Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { return mLatestDriver->getCapabilities(_hidl_cb); } - hardware::Return<void> getSupportedOperations_1_2( - const V1_2::Model& model, getSupportedOperations_1_2_cb _hidl_cb) override { + Return<void> getSupportedOperations_1_2(const V1_2::Model& model, + getSupportedOperations_1_2_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb); } - hardware::Return<void> getSupportedOperations_1_1( - const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override { + Return<void> getSupportedOperations_1_1(const V1_1::Model& model, + getSupportedOperations_1_1_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb); } - hardware::Return<void> getSupportedOperations(const V1_0::Model& model, - getSupportedOperations_cb _hidl_cb) override { + Return<void> getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel_1_2( - const V1_2::Model& model, V1_1::ExecutionPreference preference, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, - const nn::HalCacheToken& token, + Return<V1_0::ErrorStatus> prepareModel_1_2( + const V1_2::Model& model, ExecutionPreference preference, + const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache, + const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token, actualCallback); } - hardware::Return<V1_0::ErrorStatus> prepareModel_1_1( - const V1_1::Model& model, V1_1::ExecutionPreference preference, + Return<V1_0::ErrorStatus> prepareModel_1_1( + const V1_1::Model& model, ExecutionPreference preference, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel_1_1(model, preference, actualCallback); } - hardware::Return<V1_0::ErrorStatus> prepareModel( + Return<V1_0::ErrorStatus> prepareModel( const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel(model, actualCallback); } - hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } - hardware::Return<void> getVersionString(getVersionString_cb _hidl_cb) override { + Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } + Return<void> getVersionString(getVersionString_cb _hidl_cb) override { return mLatestDriver->getVersionString(_hidl_cb); } - hardware::Return<void> getType(getType_cb _hidl_cb) override { - return mLatestDriver->getType(_hidl_cb); - } - hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) { + Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); } + Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) { return mLatestDriver->getSupportedExtensions(_hidl_cb); } - hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) { + Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) { return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModelFromCache( - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, - const nn::HalCacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) { + Return<V1_0::ErrorStatus> prepareModelFromCache( + const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache, + const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) { return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback); } @@ -530,29 +495,29 @@ // Like TestDriver, but implementing 1.1 class TestDriver11 : public V1_1::IDevice { public: - TestDriver11(const std::string& name, V1_3::ErrorStatus errorStatus) + TestDriver11(const std::string& name, ErrorStatus errorStatus) : mLatestDriver(new TestDriver13(name, errorStatus)) {} - hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { + Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { return mLatestDriver->getCapabilities_1_1(_hidl_cb); } - hardware::Return<void> getSupportedOperations_1_1( - const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override { + Return<void> getSupportedOperations_1_1(const V1_1::Model& model, + getSupportedOperations_1_1_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel_1_1( - const V1_1::Model& model, V1_1::ExecutionPreference preference, + Return<V1_0::ErrorStatus> prepareModel_1_1( + const V1_1::Model& model, ExecutionPreference preference, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel_1_1(model, preference, actualCallback); } - hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } - hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { + Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } + Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { return mLatestDriver->getCapabilities(_hidl_cb); } - hardware::Return<void> getSupportedOperations(const V1_0::Model& model, - getSupportedOperations_cb _hidl_cb) override { + Return<void> getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel( + Return<V1_0::ErrorStatus> prepareModel( const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel(model, actualCallback); @@ -565,21 +530,21 @@ // Like TestDriver, but implementing 1.0 class TestDriver10 : public V1_0::IDevice { public: - TestDriver10(const std::string& name, V1_3::ErrorStatus errorStatus) + TestDriver10(const std::string& name, ErrorStatus errorStatus) : mLatestDriver(new TestDriver13(name, errorStatus)) {} - hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { + Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { return mLatestDriver->getCapabilities(_hidl_cb); } - hardware::Return<void> getSupportedOperations(const V1_0::Model& model, - getSupportedOperations_cb _hidl_cb) override { + Return<void> getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel( + Return<V1_0::ErrorStatus> prepareModel( const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel(model, actualCallback); } - hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } + Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } private: const sp<V1_3::IDevice> mLatestDriver; @@ -597,10 +562,10 @@ // Otherwise, don't bother to execute, and just send back // errorStatus (as the execution status, not the launch status). TestCompilation(const WrapperModel* model, const std::string& deviceName, - V1_3::ErrorStatus errorStatus) { + ErrorStatus errorStatus) { std::vector<std::shared_ptr<Device>> devices; auto device = DeviceManager::forTest_makeDriverDevice( - nn::makeSharedDevice(deviceName, new DriverClass(deviceName, errorStatus))); + deviceName, new DriverClass(deviceName, errorStatus)); devices.push_back(device); nn::ModelBuilder* m = reinterpret_cast<nn::ModelBuilder*>(model->getHandle()); @@ -611,7 +576,7 @@ // fall back to CPU. (If we allow CPU fallback, then when our // TestDriver reports an execution failure, we'll re-execute // on CPU, and will not see the failure.) - c->forTest_setPartitioning(DeviceManager::kPartitioningWithoutFallback); + c->setPartitioning(DeviceManager::kPartitioningWithoutFallback); mCompilation = reinterpret_cast<ANeuralNetworksCompilation*>(c); } }; @@ -650,7 +615,7 @@ template <class DriverClass> class ExecutionTestTemplate - : public ::testing::TestWithParam<std::tuple<V1_3::ErrorStatus, WrapperResult, bool>> { + : public ::testing::TestWithParam<std::tuple<ErrorStatus, Result, bool>> { public: ExecutionTestTemplate() : kName(toString(std::get<0>(GetParam()))), @@ -659,8 +624,8 @@ kUseIntrospectionAPI(std::get<2>(GetParam())), mModel(makeModel()) { if (kUseIntrospectionAPI) { - DeviceManager::get()->forTest_registerDevice( - nn::makeSharedDevice(kName, new DriverClass(kName.c_str(), kForceErrorStatus))); + DeviceManager::get()->forTest_registerDevice(kName.c_str(), + new DriverClass(kName, kForceErrorStatus)); mCompilation = TestIntrospectionCompilation(&mModel, kName); } else { mCompilation = TestCompilation<DriverClass>(&mModel, kName, kForceErrorStatus); @@ -669,8 +634,7 @@ protected: // Unit test method - // Set "reusable" to true to test reusable execution; Otherwise, test non-reusable execution. - void TestWait(bool reusable); + void TestWait(); virtual void TearDown() { // Reinitialize the device list since Introspection API path altered it. @@ -679,13 +643,6 @@ } } - void getDimensionsWhileRunning(WrapperExecution& execution) { - TestPreparedModelLatest::waitForExecutionToBegin(); - // Cannot query dimensions while execution is running - std::vector<uint32_t> dimensions; - EXPECT_EQ(execution.getOutputOperandDimensions(0, &dimensions), WrapperResult::BAD_STATE); - } - const std::string kName; // Allow dummying up the error status for execution. If @@ -693,11 +650,11 @@ // sends back the actual execution status). Otherwise, don't // bother to execute, and just send back kForceErrorStatus (as the // execution status, not the launch status). - const V1_3::ErrorStatus kForceErrorStatus; + const ErrorStatus kForceErrorStatus; - // What result do we expect from the execution? (The WrapperResult + // What result do we expect from the execution? (The Result // equivalent of kForceErrorStatus.) - const WrapperResult kExpectResult; + const Result kExpectResult; // Whether mCompilation is created via Introspection API or not. const bool kUseIntrospectionAPI; @@ -708,10 +665,8 @@ void setInputOutput(WrapperExecution* execution) { mInputBuffer = kInputBuffer; mOutputBuffer = kOutputBufferInitial; - ASSERT_EQ(execution->setInput(0, &mInputBuffer, sizeof(mInputBuffer)), - WrapperResult::NO_ERROR); - ASSERT_EQ(execution->setOutput(0, &mOutputBuffer, sizeof(mOutputBuffer)), - WrapperResult::NO_ERROR); + ASSERT_EQ(execution->setInput(0, &mInputBuffer, sizeof(mInputBuffer)), Result::NO_ERROR); + ASSERT_EQ(execution->setOutput(0, &mOutputBuffer, sizeof(mOutputBuffer)), Result::NO_ERROR); } const float kInputBuffer = 3.14; @@ -730,228 +685,189 @@ uint32_t output = model.addOperand(&tensorType); model.addOperation(ANEURALNETWORKS_FLOOR, {input}, {output}); model.identifyInputsAndOutputs({input}, {output}); - assert(model.finish() == WrapperResult::NO_ERROR); + assert(model.finish() == Result::NO_ERROR); return model; } }; -void computeHelper(bool reusable, const std::function<void()>& compute) { - { - SCOPED_TRACE(reusable ? "first time reusable" : "non-reusable"); - compute(); - } - if (reusable) { - SCOPED_TRACE("second time reusable"); - compute(); - } -} - template <class DriverClass> -void ExecutionTestTemplate<DriverClass>::TestWait(bool reusable) { +void ExecutionTestTemplate<DriverClass>::TestWait() { SCOPED_TRACE(kName); // Skip Introspection API tests when CPU only flag is forced on. if (kUseIntrospectionAPI && DeviceManager::get()->getUseCpuOnly()) { GTEST_SKIP(); } - ASSERT_EQ(mCompilation.finish(), WrapperResult::NO_ERROR); + ASSERT_EQ(mCompilation.finish(), Result::NO_ERROR); + + const auto getDimensionsWhileRunning = [](WrapperExecution& execution) { + TestPreparedModelLatest::waitForExecutionToBegin(); + // Cannot query dimensions while execution is running + std::vector<uint32_t> dimensions; + EXPECT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE); + }; { SCOPED_TRACE("startCompute"); WrapperExecution execution(&mCompilation); - ASSERT_EQ(execution.setReusable(reusable), WrapperResult::NO_ERROR); ASSERT_NO_FATAL_FAILURE(setInputOutput(&execution)); - const auto compute = [this, &execution] { - TestPreparedModelLatest::pauseExecutions(true); - WrapperEvent event; - ASSERT_EQ(execution.startCompute(&event), WrapperResult::NO_ERROR); - getDimensionsWhileRunning(execution); - TestPreparedModelLatest::pauseExecutions(false); - ASSERT_EQ(event.wait(), kExpectResult); - if (kExpectResult == WrapperResult::NO_ERROR) { - ASSERT_EQ(mOutputBuffer, kOutputBufferExpected); - } - std::vector<uint32_t> dimensions; - if (kExpectResult == WrapperResult::NO_ERROR || - kExpectResult == WrapperResult::OUTPUT_INSUFFICIENT_SIZE) { - // Only one output operand, hardcoded as index 0. - ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult); - ASSERT_EQ(dimensions, kOutputDimensionsExpected); - } else { - ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), - WrapperResult::BAD_STATE); - } - }; - computeHelper(reusable, compute); + TestPreparedModelLatest::pauseExecutions(true); + WrapperEvent event; + ASSERT_EQ(execution.startCompute(&event), Result::NO_ERROR); + getDimensionsWhileRunning(execution); + TestPreparedModelLatest::pauseExecutions(false); + ASSERT_EQ(event.wait(), kExpectResult); + if (kExpectResult == Result::NO_ERROR) { + ASSERT_EQ(mOutputBuffer, kOutputBufferExpected); + } + std::vector<uint32_t> dimensions; + if (kExpectResult == Result::NO_ERROR || + kExpectResult == Result::OUTPUT_INSUFFICIENT_SIZE) { + // Only one output operand, hardcoded as index 0. + ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult); + ASSERT_EQ(dimensions, kOutputDimensionsExpected); + } else { + ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE); + } } { SCOPED_TRACE("compute"); WrapperExecution execution(&mCompilation); - ASSERT_EQ(execution.setReusable(reusable), WrapperResult::NO_ERROR); ASSERT_NO_FATAL_FAILURE(setInputOutput(&execution)); - const auto compute = [this, &execution] { - TestPreparedModelLatest::pauseExecutions(true); - std::thread run([this, &execution] { EXPECT_EQ(execution.compute(), kExpectResult); }); - getDimensionsWhileRunning(execution); - TestPreparedModelLatest::pauseExecutions(false); - run.join(); - if (kExpectResult == WrapperResult::NO_ERROR) { - ASSERT_EQ(mOutputBuffer, kOutputBufferExpected); - } - std::vector<uint32_t> dimensions; - if (kExpectResult == WrapperResult::NO_ERROR || - kExpectResult == WrapperResult::OUTPUT_INSUFFICIENT_SIZE) { - // Only one output operand, hardcoded as index 0. - ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult); - ASSERT_EQ(dimensions, kOutputDimensionsExpected); - } else { - ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), - WrapperResult::BAD_STATE); - } - }; - computeHelper(reusable, compute); + TestPreparedModelLatest::pauseExecutions(true); + std::thread run([this, &execution] { EXPECT_EQ(execution.compute(), kExpectResult); }); + getDimensionsWhileRunning(execution); + TestPreparedModelLatest::pauseExecutions(false); + run.join(); + if (kExpectResult == Result::NO_ERROR) { + ASSERT_EQ(mOutputBuffer, kOutputBufferExpected); + } + std::vector<uint32_t> dimensions; + if (kExpectResult == Result::NO_ERROR || + kExpectResult == Result::OUTPUT_INSUFFICIENT_SIZE) { + // Only one output operand, hardcoded as index 0. + ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult); + ASSERT_EQ(dimensions, kOutputDimensionsExpected); + } else { + ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE); + } } { SCOPED_TRACE("burstCompute"); // TODO: If a burst API is added to nn::test_wrapper (e.g., - // Execution::burstCompute()), then use that, rather than - // Execution::compute(WrapperExecution::ComputeMode::BURST). + // Execution::burstCompute()), then use that, rather than using + // Execution::setComputeMode() to make Execution::compute() use burst + // functionality. + + auto oldComputeMode = + WrapperExecution::setComputeMode(WrapperExecution::ComputeMode::BURST); + base::ScopeGuard restore( + [oldComputeMode] { WrapperExecution::setComputeMode(oldComputeMode); }); WrapperExecution execution(&mCompilation); - ASSERT_EQ(execution.setReusable(reusable), WrapperResult::NO_ERROR); ASSERT_NO_FATAL_FAILURE(setInputOutput(&execution)); - const auto compute = [this, &execution] { - TestPreparedModelLatest::pauseExecutions(true); - std::thread run([this, &execution] { - EXPECT_EQ(execution.compute(WrapperExecution::ComputeMode::BURST), kExpectResult); - }); - getDimensionsWhileRunning(execution); - TestPreparedModelLatest::pauseExecutions(false); - run.join(); - if (kExpectResult == WrapperResult::NO_ERROR) { - ASSERT_EQ(mOutputBuffer, kOutputBufferExpected); - } - std::vector<uint32_t> dimensions; - if (kExpectResult == WrapperResult::NO_ERROR || - kExpectResult == WrapperResult::OUTPUT_INSUFFICIENT_SIZE) { - // Only one output operand, hardcoded as index 0. - ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult); - ASSERT_EQ(dimensions, kOutputDimensionsExpected); - } else { - ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), - WrapperResult::BAD_STATE); - } - }; - computeHelper(reusable, compute); + TestPreparedModelLatest::pauseExecutions(true); + std::thread run([this, &execution] { EXPECT_EQ(execution.compute(), kExpectResult); }); + getDimensionsWhileRunning(execution); + TestPreparedModelLatest::pauseExecutions(false); + run.join(); + if (kExpectResult == Result::NO_ERROR) { + ASSERT_EQ(mOutputBuffer, kOutputBufferExpected); + } + std::vector<uint32_t> dimensions; + if (kExpectResult == Result::NO_ERROR || + kExpectResult == Result::OUTPUT_INSUFFICIENT_SIZE) { + // Only one output operand, hardcoded as index 0. + ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult); + ASSERT_EQ(dimensions, kOutputDimensionsExpected); + } else { + ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE); + } } - if (kExpectResult != WrapperResult::OUTPUT_INSUFFICIENT_SIZE) { + if (kExpectResult != Result::OUTPUT_INSUFFICIENT_SIZE) { // computeWithDependencies doesn't support OUTPUT_INSUFFICIENT_SIZE SCOPED_TRACE("computeWithDependencies"); WrapperExecution execution(&mCompilation); - ASSERT_EQ(execution.setReusable(reusable), WrapperResult::NO_ERROR); ASSERT_NO_FATAL_FAILURE(setInputOutput(&execution)); + TestPreparedModelLatest::pauseExecutions(true); - const auto compute = [this, &execution] { - TestPreparedModelLatest::pauseExecutions(true); - - WrapperEvent event; - // Note, due to the limitation of SampleDriver implementation, the call is synchronous. - // If the SampleDriver is updated to return real sync fence, this must be updated. - std::thread run([this, &execution, &event] { - EXPECT_EQ(execution.startComputeWithDependencies({}, 0, &event), kExpectResult); - }); - getDimensionsWhileRunning(execution); - TestPreparedModelLatest::pauseExecutions(false); - run.join(); - if (kExpectResult == WrapperResult::NO_ERROR) { - ASSERT_EQ(event.wait(), kExpectResult); - ASSERT_EQ(mOutputBuffer, kOutputBufferExpected); - } else { - ASSERT_EQ(event.wait(), WrapperResult::UNEXPECTED_NULL); - } - std::vector<uint32_t> dimensions; - if (kExpectResult == WrapperResult::NO_ERROR) { - // Only one output operand, hardcoded as index 0. - ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult); - ASSERT_EQ(dimensions, kOutputDimensionsExpected); - } else { - ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), - WrapperResult::BAD_STATE); - } - }; - computeHelper(reusable, compute); + WrapperEvent event; + // Note, due to the limitation of SampleDriver implementation, the call is synchronous. + // If the SampleDriver is updated to return real sync fence, this must be updated. + std::thread run([this, &execution, &event] { + EXPECT_EQ(execution.startComputeWithDependencies({}, 0, &event), kExpectResult); + }); + getDimensionsWhileRunning(execution); + TestPreparedModelLatest::pauseExecutions(false); + run.join(); + if (kExpectResult == Result::NO_ERROR) { + ASSERT_EQ(event.wait(), kExpectResult); + ASSERT_EQ(mOutputBuffer, kOutputBufferExpected); + } else { + ASSERT_EQ(event.wait(), Result::UNEXPECTED_NULL); + } + std::vector<uint32_t> dimensions; + if (kExpectResult == Result::NO_ERROR) { + // Only one output operand, hardcoded as index 0. + ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), kExpectResult); + ASSERT_EQ(dimensions, kOutputDimensionsExpected); + } else { + ASSERT_EQ(execution.getOutputOperandDimensions(0, &dimensions), Result::BAD_STATE); + } } } auto kTestValues = ::testing::Values( - std::make_tuple(V1_3::ErrorStatus::NONE, WrapperResult::NO_ERROR, + std::make_tuple(ErrorStatus::NONE, Result::NO_ERROR, /* kUseIntrospectionAPI */ false), + std::make_tuple(ErrorStatus::DEVICE_UNAVAILABLE, Result::UNAVAILABLE_DEVICE, /* kUseIntrospectionAPI */ false), - std::make_tuple(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, WrapperResult::UNAVAILABLE_DEVICE, + std::make_tuple(ErrorStatus::GENERAL_FAILURE, Result::OP_FAILED, /* kUseIntrospectionAPI */ false), - std::make_tuple(V1_3::ErrorStatus::GENERAL_FAILURE, WrapperResult::OP_FAILED, + std::make_tuple(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, Result::OUTPUT_INSUFFICIENT_SIZE, /* kUseIntrospectionAPI */ false), - std::make_tuple(V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, - WrapperResult::OUTPUT_INSUFFICIENT_SIZE, - /* kUseIntrospectionAPI */ false), - std::make_tuple(V1_3::ErrorStatus::INVALID_ARGUMENT, WrapperResult::BAD_DATA, + std::make_tuple(ErrorStatus::INVALID_ARGUMENT, Result::BAD_DATA, /* kUseIntrospectionAPI */ false)); class ExecutionTest13 : public ExecutionTestTemplate<TestDriver13> {}; TEST_P(ExecutionTest13, Wait) { - TestWait(/*reusable=*/false); + TestWait(); } -TEST_P(ExecutionTest13, WaitReusable) { - TestWait(/*reusable=*/true); -} -INSTANTIATE_TEST_SUITE_P(Flavor, ExecutionTest13, kTestValues); +INSTANTIATE_TEST_CASE_P(Flavor, ExecutionTest13, kTestValues); class ExecutionTest12 : public ExecutionTestTemplate<TestDriver12> {}; TEST_P(ExecutionTest12, Wait) { - TestWait(/*reusable=*/false); + TestWait(); } -TEST_P(ExecutionTest12, WaitReusable) { - TestWait(/*reusable=*/true); -} -INSTANTIATE_TEST_SUITE_P(Flavor, ExecutionTest12, kTestValues); +INSTANTIATE_TEST_CASE_P(Flavor, ExecutionTest12, kTestValues); class ExecutionTest11 : public ExecutionTestTemplate<TestDriver11> {}; TEST_P(ExecutionTest11, Wait) { - if (kForceErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return; - TestWait(/*reusable=*/false); + if (kForceErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return; + TestWait(); } -TEST_P(ExecutionTest11, WaitReusable) { - if (kForceErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return; - TestWait(/*reusable=*/true); -} -INSTANTIATE_TEST_SUITE_P(Flavor, ExecutionTest11, kTestValues); +INSTANTIATE_TEST_CASE_P(Flavor, ExecutionTest11, kTestValues); class ExecutionTest10 : public ExecutionTestTemplate<TestDriver10> {}; TEST_P(ExecutionTest10, Wait) { - if (kForceErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return; - TestWait(/*reusable=*/false); + if (kForceErrorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return; + TestWait(); } -TEST_P(ExecutionTest10, WaitReusable) { - if (kForceErrorStatus == V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) return; - TestWait(/*reusable=*/true); -} -INSTANTIATE_TEST_SUITE_P(Flavor, ExecutionTest10, kTestValues); +INSTANTIATE_TEST_CASE_P(Flavor, ExecutionTest10, kTestValues); auto kIntrospectionTestValues = ::testing::Values( - std::make_tuple(V1_3::ErrorStatus::NONE, WrapperResult::NO_ERROR, + std::make_tuple(ErrorStatus::NONE, Result::NO_ERROR, /* kUseIntrospectionAPI */ true), + std::make_tuple(ErrorStatus::DEVICE_UNAVAILABLE, Result::UNAVAILABLE_DEVICE, /* kUseIntrospectionAPI */ true), - std::make_tuple(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, WrapperResult::UNAVAILABLE_DEVICE, + std::make_tuple(ErrorStatus::GENERAL_FAILURE, Result::OP_FAILED, /* kUseIntrospectionAPI */ true), - std::make_tuple(V1_3::ErrorStatus::GENERAL_FAILURE, WrapperResult::OP_FAILED, + std::make_tuple(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, Result::OUTPUT_INSUFFICIENT_SIZE, /* kUseIntrospectionAPI */ true), - std::make_tuple(V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, - WrapperResult::OUTPUT_INSUFFICIENT_SIZE, - /* kUseIntrospectionAPI */ true), - std::make_tuple(V1_3::ErrorStatus::INVALID_ARGUMENT, WrapperResult::BAD_DATA, + std::make_tuple(ErrorStatus::INVALID_ARGUMENT, Result::BAD_DATA, /* kUseIntrospectionAPI */ true)); -INSTANTIATE_TEST_SUITE_P(IntrospectionFlavor, ExecutionTest13, kIntrospectionTestValues); +INSTANTIATE_TEST_CASE_P(IntrospectionFlavor, ExecutionTest13, kIntrospectionTestValues); } // namespace } // namespace android
diff --git a/runtime/test/TestExtensions.cpp b/runtime/test/TestExtensions.cpp index 2cfcf72..4955520 100644 --- a/runtime/test/TestExtensions.cpp +++ b/runtime/test/TestExtensions.cpp
@@ -14,18 +14,16 @@ * limitations under the License. */ -#include <HalInterfaces.h> -#include <SampleDriver.h> #include <gtest/gtest.h> #include <string> #include <vector> -#include "AppInfoFetcher.h" -#include "HalUtils.h" +#include "HalInterfaces.h" #include "Manager.h" #include "NeuralNetworks.h" #include "NeuralNetworksExtensions.h" +#include "SampleDriver.h" #include "TypeManager.h" namespace { @@ -34,9 +32,7 @@ using SampleDriver = ::android::nn::sample_driver::SampleDriver; using TypeManager = ::android::nn::TypeManager; -namespace hardware = ::android::hardware; -namespace V1_0 = ::android::hardware::neuralnetworks::V1_0; -namespace V1_3 = ::android::hardware::neuralnetworks::V1_3; +using namespace android::nn::hal; const char* kTestDriverName = "extensions-test-driver"; const char* kTestExtension1 = "vendor.test.one"; @@ -48,24 +44,23 @@ TestDriver() : SampleDriver(kTestDriverName) {} ~TestDriver() override {} - hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override { + Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override { cb(V1_0::ErrorStatus::NONE, { {.name = kTestExtension1}, {.name = kTestExtension2}, {.name = kTestExtension3}, }); - return hardware::Void(); + return Void(); } - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { - cb(V1_3::ErrorStatus::NONE, ::android::nn::makeCapabilities(1.0)); - return hardware::Void(); + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { + cb(V1_3::ErrorStatus::NONE, {/* Dummy zero-filled capabilities. */}); + return Void(); } - hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model&, - getSupportedOperations_1_3_cb) override { + Return<void> getSupportedOperations_1_3(const Model&, getSupportedOperations_1_3_cb) override { CHECK(false) << "not implemented"; - return hardware::Void(); + return Void(); } }; @@ -77,8 +72,7 @@ GTEST_SKIP(); } - DeviceManager::get()->forTest_registerDevice( - android::nn::makeSharedDevice(kTestDriverName, new TestDriver())); + DeviceManager::get()->forTest_registerDevice(kTestDriverName, new TestDriver()); // Discover extensions provided by registered devices. TypeManager::get()->forTest_reset(); mDevice = getDeviceByName(kTestDriverName); @@ -131,7 +125,8 @@ std::vector<std::string> allowlist = {"/data/foo", "/vendor/foo", "/odm/foo", "/product/foo", "/system/allowlisted", "/foobar/foo"}; - auto native_info = [&](const std::string& binaryPath) -> android::nn::AppInfoFetcher::AppInfo { + auto native_info = + [&](const std::string& binaryPath) -> android::nn::TypeManager::AppPackageInfo { return {.binaryPath = binaryPath, .appPackageName = "", .appIsSystemApp = false, @@ -175,30 +170,22 @@ EXPECT_TRUE(TypeManager::isExtensionsUseAllowed(native_info("/product/foo"), /* useOnProductImageEnabled = */ true, allowlist)); - - // Allowlist for vendor/data partiion is not present on Android S. - // Before S, checks below will fail. On S and later they will succeed. - bool disableProductAllowlist = android_get_device_api_level() >= __ANDROID_API_S__; - // Non-allowlisted /product binary, product enabled - EXPECT_EQ(TypeManager::isExtensionsUseAllowed(native_info("/product/foo_not_allowlisted"), - /* useOnProductImageEnabled = */ true, allowlist), - disableProductAllowlist); + EXPECT_FALSE(TypeManager::isExtensionsUseAllowed(native_info("/product/foo_not_allowlisted"), + /* useOnProductImageEnabled = */ true, + allowlist)); // Non-allowlisted /odm binary - EXPECT_EQ( - TypeManager::isExtensionsUseAllowed(native_info("/odm/foo_not_allowlisted"), - /* useOnProductImageEnabled = */ false, allowlist), - disableProductAllowlist); + EXPECT_FALSE(TypeManager::isExtensionsUseAllowed(native_info("/odm/foo_not_allowlisted"), + /* useOnProductImageEnabled = */ false, + allowlist)); // Non-allowlisted /vendor binary - EXPECT_EQ( - TypeManager::isExtensionsUseAllowed(native_info("/vendor/foo_not_allowlisted"), - /* useOnProductImageEnabled = */ false, allowlist), - disableProductAllowlist); + EXPECT_FALSE(TypeManager::isExtensionsUseAllowed(native_info("/vendor/foo_not_allowlisted"), + /* useOnProductImageEnabled = */ false, + allowlist)); // Non-allowlisted /data binary - EXPECT_EQ( - TypeManager::isExtensionsUseAllowed(native_info("/data/foo_not_allowlisted"), - /* useOnProductImageEnabled = */ false, allowlist), - disableProductAllowlist); + EXPECT_FALSE(TypeManager::isExtensionsUseAllowed(native_info("/data/foo_not_allowlisted"), + /* useOnProductImageEnabled = */ false, + allowlist)); } TEST_F(ExtensionsTest, TestAllowedApps) { @@ -211,10 +198,6 @@ std::vector<std::string> allowlist = {"com.foo"}; - // Allowlist for vendor/data partiion is not present on Android S. - // Before S, checks below will fail. On S and later they will succeed. - bool disableProductAllowlist = android_get_device_api_level() >= __ANDROID_API_S__; - auto test_app_process = [&](const std::string& binary) { // /data app EXPECT_TRUE(TypeManager::isExtensionsUseAllowed({.binaryPath = binary, @@ -262,34 +245,31 @@ allowlist)); // /product app, enabled, package name not on allowlist - EXPECT_EQ(TypeManager::isExtensionsUseAllowed({.binaryPath = binary, - .appPackageName = package_non_allowlisted, - .appIsSystemApp = true, - .appIsOnVendorImage = false, - .appIsOnProductImage = true}, - /* useOnProductImageEnabled = */ true, - allowlist), - disableProductAllowlist); + EXPECT_FALSE(TypeManager::isExtensionsUseAllowed({.binaryPath = binary, + .appPackageName = package_non_allowlisted, + .appIsSystemApp = true, + .appIsOnVendorImage = false, + .appIsOnProductImage = true}, + /* useOnProductImageEnabled = */ true, + allowlist)); // /data app, package name not on allowlist - EXPECT_EQ(TypeManager::isExtensionsUseAllowed({.binaryPath = binary, - .appPackageName = package_non_allowlisted, - .appIsSystemApp = false, - .appIsOnVendorImage = false, - .appIsOnProductImage = false}, - /* useOnProductImageEnabled = */ false, - allowlist), - disableProductAllowlist); + EXPECT_FALSE(TypeManager::isExtensionsUseAllowed({.binaryPath = binary, + .appPackageName = package_non_allowlisted, + .appIsSystemApp = false, + .appIsOnVendorImage = false, + .appIsOnProductImage = false}, + /* useOnProductImageEnabled = */ false, + allowlist)); // /vendor || /odm app, package name not on allowlist - EXPECT_EQ(TypeManager::isExtensionsUseAllowed({.binaryPath = binary, - .appPackageName = package_non_allowlisted, - .appIsSystemApp = true, - .appIsOnVendorImage = true, - .appIsOnProductImage = false}, - /* useOnProductImageEnabled = */ false, - allowlist), - disableProductAllowlist); + EXPECT_FALSE(TypeManager::isExtensionsUseAllowed({.binaryPath = binary, + .appPackageName = package_non_allowlisted, + .appIsSystemApp = true, + .appIsOnVendorImage = true, + .appIsOnProductImage = false}, + /* useOnProductImageEnabled = */ false, + allowlist)); }; test_app_process(app_process64); test_app_process(app_process32);
diff --git a/runtime/test/TestFailingDriver.cpp b/runtime/test/TestFailingDriver.cpp index 2c76cf3..7d41ace 100644 --- a/runtime/test/TestFailingDriver.cpp +++ b/runtime/test/TestFailingDriver.cpp
@@ -14,22 +14,21 @@ * limitations under the License. */ -#include <SampleDriverPartial.h> #include <gtest/gtest.h> -#include <algorithm> #include <memory> #include <vector> #include "CompilationBuilder.h" #include "ExecutionPlan.h" -#include "HalUtils.h" #include "Manager.h" +#include "SampleDriverPartial.h" #include "TestNeuralNetworksWrapper.h" namespace android::nn { namespace { +using namespace hal; using sample_driver::SampleDriverPartial; using Result = test_wrapper::Result; using WrapperOperandType = test_wrapper::OperandType; @@ -51,18 +50,20 @@ // EmptyOperationResolver causes execution to fail. FailingTestDriver() : SampleDriverPartial(kTestDriverName, &mEmptyOperationResolver) {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { - cb(V1_3::ErrorStatus::NONE, makeCapabilities(0.1)); // Faster than CPU. - return hardware::Void(); + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { + cb(V1_3::ErrorStatus::NONE, + {.operandPerformance = {{.type = OperandType::TENSOR_FLOAT32, + .info = {.execTime = 0.1, // Faster than CPU. + .powerUsage = 0.1}}}}); + return Void(); } private: - std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override { + std::vector<bool> getSupportedOperationsImpl(const Model& model) const override { std::vector<bool> supported(model.main.operations.size()); - std::transform(model.main.operations.begin(), model.main.operations.end(), - supported.begin(), [](const V1_3::Operation& operation) { - return operation.type == V1_3::OperationType::SQRT; - }); + std::transform( + model.main.operations.begin(), model.main.operations.end(), supported.begin(), + [](const Operation& operation) { return operation.type == OperationType::SQRT; }); return supported; } @@ -76,8 +77,8 @@ !DeviceManager::partitioningAllowsFallback(deviceManager->getPartitioning())) { GTEST_SKIP(); } - mTestDevice = DeviceManager::forTest_makeDriverDevice( - makeSharedDevice(kTestDriverName, new FailingTestDriver())); + mTestDevice = + DeviceManager::forTest_makeDriverDevice(kTestDriverName, new FailingTestDriver()); deviceManager->forTest_setDevices({ mTestDevice, DeviceManager::getCpuDevice(),
diff --git a/runtime/test/TestFree.cpp b/runtime/test/TestFree.cpp index 519e9a7..dedf55e 100644 --- a/runtime/test/TestFree.cpp +++ b/runtime/test/TestFree.cpp
@@ -19,12 +19,12 @@ // Limitation: It doesn't set various combinations of properties on objects before // freeing those objects. +#include "NeuralNetworks.h" + #include <gtest/gtest.h> #include <vector> -#include "NeuralNetworks.h" - namespace { ANeuralNetworksModel* createUnfinishedModel() {
diff --git a/runtime/test/TestGenerated.cpp b/runtime/test/TestGenerated.cpp index bc57f33..70b0e6f 100644 --- a/runtime/test/TestGenerated.cpp +++ b/runtime/test/TestGenerated.cpp
@@ -33,7 +33,6 @@ #include <utility> #include <vector> -#include "AndroidVersionUtil.h" #include "GeneratedTestUtils.h" #include "TestHarness.h" #include "TestNeuralNetworksWrapper.h" @@ -43,7 +42,7 @@ // constraints. We reuse the NNTEST_ONLY_PUBLIC_API flag, as that should also be // the case for CTS (public APIs only). #ifndef NNTEST_ONLY_PUBLIC_API -#include <Tracing.h> +#include "Tracing.h" #else #define NNTRACE_FULL_RAW(...) #define NNTRACE_APP(...) @@ -66,8 +65,6 @@ bool shouldSkipTest(); std::optional<Compilation> compileModel(const Model& model); - void executeInternal(const Compilation& compilation, const TestModel& testModel, - bool testReusableExecution); void executeWithCompilation(const Compilation& compilation, const TestModel& testModel); void executeOnce(const Model& model, const TestModel& testModel); void executeMultithreadedOwnCompilation(const Model& model, const TestModel& testModel); @@ -85,8 +82,6 @@ bool mExpectFailure = false; bool mTestQuantizationCoupling = false; bool mTestDeviceMemory = false; - bool mTestReusableExecution = true; - Execution::ComputeMode mComputeMode = Execution::getComputeMode(); }; int GeneratedTests::mVndkVersion = __ANDROID_API_FUTURE__; @@ -108,13 +103,7 @@ class QuantizationCouplingTest : public GeneratedTests { protected: - QuantizationCouplingTest() { - mTestQuantizationCoupling = true; - // QuantizationCouplingTest is intended for verifying if a driver supports ASYMM quant8, it - // must support SYMM quant8. All the models in QuantizationCouplingTest will also be - // executed in other test suites, so there is no need to test reusable execution again. - mTestReusableExecution = false; - } + QuantizationCouplingTest() { mTestQuantizationCoupling = true; } }; class DeviceMemoryTest : public GeneratedTests { @@ -149,6 +138,15 @@ } } +static void computeWithPtrs(const TestModel& testModel, Execution* execution, Result* result, + std::vector<TestBuffer>* outputs) { + { + NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "computeWithPtrs example"); + createRequest(testModel, execution, outputs); + } + *result = execution->compute(); +} + static ANeuralNetworksMemory* createDeviceMemoryForInput(const Compilation& compilation, uint32_t index) { ANeuralNetworksMemoryDesc* desc = nullptr; @@ -157,7 +155,7 @@ ANEURALNETWORKS_NO_ERROR); EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR); ANeuralNetworksMemory* memory = nullptr; - EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR); + ANeuralNetworksMemory_createFromDesc(desc, &memory); ANeuralNetworksMemoryDesc_free(desc); return memory; } @@ -170,58 +168,57 @@ ANEURALNETWORKS_NO_ERROR); EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR); ANeuralNetworksMemory* memory = nullptr; - EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR); + ANeuralNetworksMemory_createFromDesc(desc, &memory); ANeuralNetworksMemoryDesc_free(desc); return memory; } -static void createRequestWithDeviceMemories(const Compilation& compilation, - const TestModel& testModel, Execution* execution, - std::vector<Memory>* inputMemories, - std::vector<Memory>* outputMemories) { +// Set result = Result::NO_ERROR and outputs = {} if the test should be skipped. +static void computeWithDeviceMemories(const Compilation& compilation, const TestModel& testModel, + Execution* execution, Result* result, + std::vector<TestBuffer>* outputs) { ASSERT_NE(execution, nullptr); - ASSERT_NE(inputMemories, nullptr); - ASSERT_NE(outputMemories, nullptr); + ASSERT_NE(result, nullptr); + ASSERT_NE(outputs, nullptr); + outputs->clear(); + std::vector<Memory> inputMemories, outputMemories; - // Model inputs. - for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) { - SCOPED_TRACE("Input index: " + std::to_string(i)); - const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]]; - // Omitted input. - if (operand.data.size() == 0) { - ASSERT_EQ(Result::NO_ERROR, execution->setInput(i, nullptr, 0)); - continue; + { + NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "computeWithDeviceMemories example"); + // Model inputs. + for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) { + SCOPED_TRACE("Input index: " + std::to_string(i)); + const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]]; + // Omitted input. + if (operand.data.size() == 0) { + ASSERT_EQ(Result::NO_ERROR, execution->setInput(i, nullptr, 0)); + continue; + } + + // Create device memory. + ANeuralNetworksMemory* memory = createDeviceMemoryForInput(compilation, i); + ASSERT_NE(memory, nullptr); + auto& wrapperMemory = inputMemories.emplace_back(memory); + + // Copy data from TestBuffer to device memory. + auto ashmem = TestAshmem::createFrom(operand.data); + ASSERT_NE(ashmem, nullptr); + ASSERT_EQ(ANeuralNetworksMemory_copy(ashmem->get()->get(), memory), + ANEURALNETWORKS_NO_ERROR); + ASSERT_EQ(Result::NO_ERROR, execution->setInputFromMemory(i, &wrapperMemory, 0, 0)); } - // Create device memory. - ANeuralNetworksMemory* memory = createDeviceMemoryForInput(compilation, i); - ASSERT_NE(memory, nullptr); - auto& wrapperMemory = inputMemories->emplace_back(memory); - - // Copy data from TestBuffer to device memory. - auto ashmem = TestAshmem::createFrom(operand.data); - ASSERT_NE(ashmem, nullptr); - ASSERT_EQ(ANeuralNetworksMemory_copy(ashmem->get()->get(), memory), - ANEURALNETWORKS_NO_ERROR); - ASSERT_EQ(Result::NO_ERROR, execution->setInputFromMemory(i, &wrapperMemory, 0, 0)); + // Model outputs. + for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) { + SCOPED_TRACE("Output index: " + std::to_string(i)); + ANeuralNetworksMemory* memory = createDeviceMemoryForOutput(compilation, i); + ASSERT_NE(memory, nullptr); + auto& wrapperMemory = outputMemories.emplace_back(memory); + ASSERT_EQ(Result::NO_ERROR, execution->setOutputFromMemory(i, &wrapperMemory, 0, 0)); + } } - // Model outputs. - for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) { - SCOPED_TRACE("Output index: " + std::to_string(i)); - ANeuralNetworksMemory* memory = createDeviceMemoryForOutput(compilation, i); - ASSERT_NE(memory, nullptr); - auto& wrapperMemory = outputMemories->emplace_back(memory); - ASSERT_EQ(Result::NO_ERROR, execution->setOutputFromMemory(i, &wrapperMemory, 0, 0)); - } -} - -static void copyResultsFromDeviceMemories(const TestModel& testModel, - const std::vector<Memory>& outputMemories, - std::vector<TestBuffer>* outputs) { - ASSERT_NE(outputs, nullptr); - ASSERT_EQ(testModel.main.outputIndexes.size(), outputMemories.size()); - outputs->clear(); + *result = execution->compute(); // Copy out output results. for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) { @@ -239,102 +236,43 @@ } } -void GeneratedTests::executeInternal(const Compilation& compilation, const TestModel& testModel, - bool testReusableExecution) { - NNTRACE_APP(NNTRACE_PHASE_EXECUTION, "executeInternal example"); - - Execution execution(&compilation); - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - execution.setReusable(testReusableExecution); - } - - std::vector<TestBuffer> outputs; - std::vector<Memory> inputMemories, outputMemories; - - if (mTestDeviceMemory) { - createRequestWithDeviceMemories(compilation, testModel, &execution, &inputMemories, - &outputMemories); - } else { - createRequest(testModel, &execution, &outputs); - } - - const auto computeAndCheckResults = [this, &testModel, &execution, &outputs, &outputMemories] { - Result result = execution.compute(mComputeMode); - if (mTestDeviceMemory) { - copyResultsFromDeviceMemories(testModel, outputMemories, &outputs); - } - - if (result == Result::NO_ERROR && outputs.empty()) { - return; - } - - { - NNTRACE_APP(NNTRACE_PHASE_RESULTS, "executeInternal example"); - if (mExpectFailure) { - ASSERT_NE(result, Result::NO_ERROR); - return; - } else { - ASSERT_EQ(result, Result::NO_ERROR); - } - - // Check output dimensions. - for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) { - SCOPED_TRACE("Output index: " + std::to_string(i)); - const auto& output = testModel.main.operands[testModel.main.outputIndexes[i]]; - if (output.isIgnored) continue; - std::vector<uint32_t> actualDimensions; - ASSERT_EQ(Result::NO_ERROR, - execution.getOutputOperandDimensions(i, &actualDimensions)); - ASSERT_EQ(output.dimensions, actualDimensions); - } - - checkResults(testModel, outputs); - } - }; - - computeAndCheckResults(); - if (testReusableExecution) { - computeAndCheckResults(); - } -} - void GeneratedTests::executeWithCompilation(const Compilation& compilation, const TestModel& testModel) { - // Single-time and reusable executions have different code paths, so test both. - executeInternal(compilation, testModel, /*testReusableExecution=*/false); - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - if (mTestReusableExecution) { - executeInternal(compilation, testModel, /*testReusableExecution=*/true); - } + NNTRACE_APP(NNTRACE_PHASE_EXECUTION, "executeWithCompilation example"); + + Execution execution(&compilation); + Result result; + std::vector<TestBuffer> outputs; + + if (mTestDeviceMemory) { + computeWithDeviceMemories(compilation, testModel, &execution, &result, &outputs); + } else { + computeWithPtrs(testModel, &execution, &result, &outputs); } -} -static bool isPowerOfTwo(uint32_t x) { - return x > 0 && ((x & (x - 1)) == 0); -} + if (result == Result::NO_ERROR && outputs.empty()) { + return; + } -static void validateCompilationMemoryPreferences(const Compilation& compilation, - const TestModel& testModel) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) { - SCOPED_TRACE("Input index: " + std::to_string(i)); - uint32_t alignment = 0, padding = 0; - ASSERT_EQ(compilation.getPreferredMemoryAlignmentForInput(i, &alignment), - Result::NO_ERROR); - ASSERT_EQ(compilation.getPreferredMemoryPaddingForInput(i, &padding), Result::NO_ERROR); - EXPECT_TRUE(isPowerOfTwo(alignment)) << "alignment: " << alignment; - EXPECT_TRUE(isPowerOfTwo(padding)) << "padding: " << padding; + { + NNTRACE_APP(NNTRACE_PHASE_RESULTS, "executeWithCompilation example"); + if (mExpectFailure) { + ASSERT_NE(result, Result::NO_ERROR); + return; + } else { + ASSERT_EQ(result, Result::NO_ERROR); } + + // Check output dimensions. for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) { - SCOPED_TRACE("Output index: " + std::to_string(i)); - uint32_t alignment = 0, padding = 0; - ASSERT_EQ(compilation.getPreferredMemoryAlignmentForOutput(i, &alignment), - Result::NO_ERROR); - ASSERT_EQ(compilation.getPreferredMemoryPaddingForOutput(i, &padding), - Result::NO_ERROR); - EXPECT_TRUE(isPowerOfTwo(alignment)) << "alignment: " << alignment; - EXPECT_TRUE(isPowerOfTwo(padding)) << "padding: " << padding; + const auto& output = testModel.main.operands[testModel.main.outputIndexes[i]]; + if (output.isIgnored) continue; + std::vector<uint32_t> actualDimensions; + ASSERT_EQ(Result::NO_ERROR, execution.getOutputOperandDimensions(i, &actualDimensions)); + ASSERT_EQ(output.dimensions, actualDimensions); } + + checkResults(testModel, outputs); } } @@ -343,7 +281,6 @@ std::optional<Compilation> compilation = compileModel(model); // Early return if compilation fails. The compilation result code is checked in compileModel. if (!compilation) return; - validateCompilationMemoryPreferences(compilation.value(), testModel); executeWithCompilation(compilation.value(), testModel); } @@ -450,18 +387,21 @@ #ifdef NNTEST_COMPUTE_MODE TEST_P(GeneratedTests, Sync) { - mComputeMode = Execution::ComputeMode::SYNC; + const auto oldComputeMode = Execution::setComputeMode(Execution::ComputeMode::SYNC); execute(testModel); + Execution::setComputeMode(oldComputeMode); } TEST_P(GeneratedTests, Async) { - mComputeMode = Execution::ComputeMode::ASYNC; + const auto oldComputeMode = Execution::setComputeMode(Execution::ComputeMode::ASYNC); execute(testModel); + Execution::setComputeMode(oldComputeMode); } TEST_P(GeneratedTests, Burst) { - mComputeMode = Execution::ComputeMode::BURST; + const auto oldComputeMode = Execution::setComputeMode(Execution::ComputeMode::BURST); execute(testModel); + Execution::setComputeMode(oldComputeMode); } #else TEST_P(GeneratedTests, Test) { @@ -486,8 +426,9 @@ } TEST_P(FencedComputeTest, Test) { - mComputeMode = Execution::ComputeMode::FENCED; + const auto oldComputeMode = Execution::setComputeMode(Execution::ComputeMode::FENCED); execute(testModel); + Execution::setComputeMode(oldComputeMode); } INSTANTIATE_GENERATED_TEST(GeneratedTests,
diff --git a/runtime/test/TestGpuNnapi.cpp b/runtime/test/TestGpuNnapi.cpp deleted file mode 100644 index 333d867..0000000 --- a/runtime/test/TestGpuNnapi.cpp +++ /dev/null
@@ -1,1034 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <android-base/logging.h> -#include <android-base/unique_fd.h> -#include <android/hardware_buffer.h> -#include <gtest/gtest.h> -#include <vulkan/vulkan.h> -#include <vulkan/vulkan_android.h> - -#include <algorithm> -#include <cmath> -#include <cstring> -#include <memory> -#include <string> -#include <utility> -#include <vector> - -#include "TestNeuralNetworksWrapper.h" - -#ifndef NNTEST_ONLY_PUBLIC_API -#include "Manager.h" -#endif - -namespace android::nn { -namespace { - -using Type = test_wrapper::Type; -using OperandType = test_wrapper::OperandType; -using Result = test_wrapper::Result; - -constexpr uint32_t kOperandSizeX = 256; -constexpr uint32_t kOperandSizeY = 256; -constexpr uint32_t kOperandLength = kOperandSizeX * kOperandSizeY; -constexpr uint32_t kNumberOfIterationsToTest = 100; -constexpr uint32_t kMaxNumberOfPrintedErrors = 10; - -// This file implements a test suite that exercises a GPU -> NNAPI pipeline using AHardwareBuffer -// and sync fence. One pass of the pipeline involves the following three stages: -// -// - GPU: Invoke the compute shader to clear the all elements in the output buffer to value "1" -// of the corresponding element type. Because GPU may not be able to natively support -// float16/int8/uint8 data types, we pack each data type into a 4-byte chunk as uint32_t -// and pass to the shader. E.g., float16 will be packed as 0x3c003c00 -- float16 value -// of "1" (0x3c00) repeated twice. The compute shader will use this 4-byte chunk to clear -// the data in the output buffer (see CLEAR_DATA in the compute shader code). -// -// The GPU workload will output directly to an AHardwareBuffer and export an Android sync -// fence. -// -// - NNAPI: Execute a broadcast ADD operation -// -// output = ADD(input, const, act) -// -// where "input" and "output" are of size [kOperandSizeY, kOperandSizeX], "const" and -// "act" are model constant operands, "const" is of size [1] and value "1" of the -// corresponding element type, "act" = 0. The ADD operation will increment each element -// in the input tensor by 1. -// -// The NNAPI executor takes the GPU output AHardwareBuffer as its input memory, -// and directly outputs to another AHardwareBuffer. We use startComputeWithDependencies -// to wait on the sync fence from the GPU workload. If supported, the NNAPI executor will -// emit a sync fence; Otherwise, it will wait until the workload is finished. -// -// - Check: Verify that each element in the resulting tensor is 1 + 1 = 2. -// -// We use introspection API to run the pipeline with each individual driver. Because this test is -// added in NNAPI feature level 5, we will exclude devices with a lower feature level. We expect -// that if the driver successfully prepares the model, it should finish execution without an error. -// -// The pipeline is tested with four data types: float32, float16, quant8_asymm, and -// quant8_asymm_signed. These data types are chosen to make sure that a driver is likely to -// support at least one of the data types. -// -// For each configuration, we run the pipeline for kNumberOfIterationsToTest iterations. - -const std::vector<uint32_t> kComputeShader = -#include "shaders/TestGpuNnapi.comp.spv.inl" - ; - -// The expected element value in the final NNAPI output AHardwareBuffer. -constexpr uint32_t kExpectedResultInInt = 2; - -// Helper templates for information related to a primary tensor data type. Only four specializations -// exists for this template: Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_QUANT8_ASYMM, -// and Type::TENSOR_QUANT8_ASYMM_SIGNED. Each specialization corresponds to a primary data type for -// the testing pipeline. -// -// Each template specialization defines the following fields: -// - ElementType: The corresponding C++ type. Use sizeof(ElementType) to get the element size. -// - kIsQuantized: Whether the data type is a quantized type or not. -// - kClearData: The CLEAR_DATA used in the compute shader. -// - kTolerance: The absolute tolerance used to check the computation result. -template <Type dataType> -struct TestTypeHelper; -template <> -struct TestTypeHelper<Type::TENSOR_FLOAT32> { - using ElementType = float; - static constexpr bool kIsQuantized = false; - // One float32 of value (1.0) packed into uint32_t - static constexpr uint32_t kClearData = 0x3f800000; - static constexpr double kTolerance = 1e-6; -}; -template <> -struct TestTypeHelper<Type::TENSOR_FLOAT16> { - using ElementType = _Float16; - static constexpr bool kIsQuantized = false; - // Two float16 of value (1.0) packed into uint32_t - static constexpr uint32_t kClearData = 0x3c003c00; - static constexpr double kTolerance = 1e-3; -}; -template <> -struct TestTypeHelper<Type::TENSOR_QUANT8_ASYMM> { - using ElementType = uint8_t; - static constexpr bool kIsQuantized = true; - // Four uint8_t of value (1) packed into uint32_t - static constexpr uint32_t kClearData = 0x01010101; - static constexpr double kTolerance = 0; -}; -template <> -struct TestTypeHelper<Type::TENSOR_QUANT8_ASYMM_SIGNED> { - using ElementType = int8_t; - static constexpr bool kIsQuantized = true; - // Four int8_t of value (1) packed into uint32_t - static constexpr uint32_t kClearData = 0x01010101; - static constexpr double kTolerance = 0; -}; - -bool isExtensionSupported(const std::vector<VkExtensionProperties>& supportedExtensions, - const char* requestedExtension) { - return std::any_of(supportedExtensions.begin(), supportedExtensions.end(), - [requestedExtension](const auto& extension) { - return strcmp(extension.extensionName, requestedExtension) == 0; - }); -} - -// Records the workgroup size and the group counts of dispatching the compute shader. -struct DispatchSize { - uint32_t workgroupSize; - uint32_t groupCountX; - uint32_t groupCountY; -}; - -// Choose an appropriate dispatch size. We are using a square workgroup size. -template <Type dataType> -DispatchSize chooseDispatchSize(const VkPhysicalDeviceLimits& limits) { - // Compute the number of invocations along each dimension. - const uint32_t elementSize = sizeof(typename TestTypeHelper<dataType>::ElementType); - const uint32_t numberOfElementsPerInvocation = sizeof(uint32_t) / elementSize; - const uint32_t workgroupInvocationsX = kOperandSizeX / numberOfElementsPerInvocation; - const uint32_t workgroupInvocationsY = kOperandSizeY; - - // Make sure the workgroup size does not exceed the number of invocations along the X and Y - // dimensions. - uint32_t workgroupSize = std::min(workgroupInvocationsX, workgroupInvocationsY); - - // Make sure the workgroup size does not exceed the device limit along the X and Y dimensions. - workgroupSize = std::min<uint32_t>(workgroupSize, limits.maxComputeWorkGroupSize[0]); - workgroupSize = std::min<uint32_t>(workgroupSize, limits.maxComputeWorkGroupSize[1]); - - // Make sure the total number of invocations does not exceed the device limit. - uint32_t maxSquareWorkGroupSize = - static_cast<uint32_t>(std::sqrt(limits.maxComputeWorkGroupInvocations)); - workgroupSize = std::min(workgroupSize, maxSquareWorkGroupSize); - - // Round down to a power of 2. This is to make sure workgroupInvocationsX and - // workgroupInvocationsY are divisible by the workgroup size so that we don't need to apply - // bound check in the shader. - uint32_t power = static_cast<uint32_t>(std::log2(static_cast<float>(workgroupSize))); - workgroupSize = 1u << power; - CHECK(workgroupInvocationsX % workgroupSize == 0); - CHECK(workgroupInvocationsY % workgroupSize == 0); - - return { - .workgroupSize = workgroupSize, - .groupCountX = workgroupInvocationsX / workgroupSize, - .groupCountY = workgroupInvocationsY / workgroupSize, - }; -} - -// Find the first memory index that satisfies the requirements -// See VkAndroidHardwareBufferPropertiesANDROID::memoryTypeBits for the semantics of -// "memoryTypeBitsRequirement" -std::optional<uint32_t> findMemoryType(const VkPhysicalDeviceMemoryProperties& properties, - uint32_t memoryTypeBitsRequirement, - VkDeviceSize sizeRequirement) { - for (uint32_t memoryIndex = 0; memoryIndex < VK_MAX_MEMORY_TYPES; ++memoryIndex) { - const uint32_t memoryTypeBits = (1 << memoryIndex); - const bool isRequiredMemoryType = memoryTypeBitsRequirement & memoryTypeBits; - const uint32_t heapIndex = properties.memoryTypes[memoryIndex].heapIndex; - const bool isLargeEnough = properties.memoryHeaps[heapIndex].size >= sizeRequirement; - if (isRequiredMemoryType && isLargeEnough) return memoryIndex; - } - - // failed to find memory type. - return std::nullopt; -} - -void addBufferTransitionBarrier(VkCommandBuffer commandBuffer, VkBuffer buffer, - VkPipelineStageFlags srcStageMask, - VkPipelineStageFlags dstStageMask, VkAccessFlags srcAccessMask, - VkAccessFlags dstAccessMask, uint32_t srcQueue, uint32_t dstQueue) { - const VkBufferMemoryBarrier bufferBarrier = { - .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, - .pNext = nullptr, - .srcAccessMask = srcAccessMask, - .dstAccessMask = dstAccessMask, - .srcQueueFamilyIndex = srcQueue, - .dstQueueFamilyIndex = dstQueue, - .buffer = buffer, - .offset = 0, - .size = VK_WHOLE_SIZE, - }; - vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, 0, 0, nullptr, 1, - &bufferBarrier, 0, nullptr); -} - -void allocateBlobAhwb(uint32_t size, uint64_t usage, AHardwareBuffer** outAhwb) { - AHardwareBuffer_Desc desc = { - .width = size, - .height = 1u, - .layers = 1u, - .format = AHARDWAREBUFFER_FORMAT_BLOB, - .usage = usage, - }; - if (AHardwareBuffer_allocate(&desc, outAhwb) != 0) { - GTEST_SKIP() << "Device failed to allocate Android hardware buffer"; - } -} - -using NameAndDevice = std::pair<const char*, const ANeuralNetworksDevice*>; - -void getNnapiDevices(std::vector<NameAndDevice>* outDevices) { - // Get the number of available NNAPI devices - uint32_t numDevices = 0; - ASSERT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR); - - std::vector<NameAndDevice> devices; - for (uint32_t i = 0; i < numDevices; i++) { - // Get device - ANeuralNetworksDevice* device; - ASSERT_EQ(ANeuralNetworks_getDevice(/*devIndex=*/i, &device), ANEURALNETWORKS_NO_ERROR); - - // Get device name - const char* deviceName = nullptr; - ASSERT_EQ(ANeuralNetworksDevice_getName(device, &deviceName), ANEURALNETWORKS_NO_ERROR); - - // Check device feature level. This test is added in NNAPI feature level 5, so skip if the - // device is of a lower feature level. - int64_t featureLevel; - ASSERT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel), - ANEURALNETWORKS_NO_ERROR); - if (featureLevel < ANEURALNETWORKS_FEATURE_LEVEL_5) { - continue; - } - - devices.emplace_back(deviceName, device); - } - *outDevices = std::move(devices); -} - -std::vector<NameAndDevice> getNnapiDevices() { - std::vector<NameAndDevice> devices; - getNnapiDevices(&devices); - return devices; -} - -std::string printGpuNnapiTest(const testing::TestParamInfo<NameAndDevice>& info) { - std::string name = info.param.first; - // gtest test names must only contain alphanumeric characters - std::replace_if( - name.begin(), name.end(), [](char c) { return !std::isalnum(c); }, '_'); - return name; -} - -template <Type dataType> -class VulkanComputePipeline { - public: - // Returns the created object on success, or nullptr on failure. - static std::unique_ptr<VulkanComputePipeline> create(AHardwareBuffer* output) { - auto pipeline = std::make_unique<VulkanComputePipeline>(); - pipeline->initialize(output); - return pipeline->mIsValid ? std::move(pipeline) : nullptr; - } - - ~VulkanComputePipeline() { - if (mDevice != VK_NULL_HANDLE) { - vkDestroyFence(mDevice, mFence, nullptr); - vkDestroyPipeline(mDevice, mPipeline, nullptr); - vkDestroyDescriptorSetLayout(mDevice, mDescriptorSetLayout, nullptr); - vkDestroyPipelineLayout(mDevice, mPipelineLayout, nullptr); - vkFreeMemory(mDevice, mOutputBufferMemory, nullptr); - vkDestroyBuffer(mDevice, mOutputBuffer, nullptr); - vkDestroyShaderModule(mDevice, mShaderModule, nullptr); - vkDestroyCommandPool(mDevice, mCommandPool, nullptr); - vkDestroyDescriptorPool(mDevice, mDescriptorPool, nullptr); - } - vkDestroyDevice(mDevice, nullptr); - vkDestroyInstance(mInstance, nullptr); - } - - // Returns {success, sync_fd} - std::pair<bool, base::unique_fd> run() { - bool success = false; - base::unique_fd outSyncFd; - runInternal(&success, &outSyncFd); - return {success, std::move(outSyncFd)}; - } - - private: - void initialize(AHardwareBuffer* output) { - // Create instance - const VkApplicationInfo applicationDesc = { - .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, - .pApplicationName = "TestGpuNnapi", - .applicationVersion = VK_MAKE_VERSION(1, 0, 0), - .apiVersion = VK_API_VERSION_1_1, - }; - const VkInstanceCreateInfo instanceDesc = { - .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, - .pApplicationInfo = &applicationDesc, - .enabledLayerCount = 0, - .ppEnabledLayerNames = nullptr, - .enabledExtensionCount = 0, - .ppEnabledExtensionNames = nullptr, - }; - ASSERT_EQ(vkCreateInstance(&instanceDesc, nullptr, &mInstance), VK_SUCCESS); - - // Enumerate physical devices - uint32_t numberOfDevices = 0; - ASSERT_EQ(vkEnumeratePhysicalDevices(mInstance, &numberOfDevices, nullptr), VK_SUCCESS); - std::vector<VkPhysicalDevice> physicalDevices(numberOfDevices); - ASSERT_EQ(vkEnumeratePhysicalDevices(mInstance, &numberOfDevices, physicalDevices.data()), - VK_SUCCESS); - - // Pick the first device with a compute queue - for (const auto& physicalDevice : physicalDevices) { - uint32_t numberOfQueueFamilies = 0; - vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &numberOfQueueFamilies, - nullptr); - std::vector<VkQueueFamilyProperties> queueFamilies(numberOfQueueFamilies); - vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &numberOfQueueFamilies, - queueFamilies.data()); - - uint32_t pickedQueueFamilyIndex = 0; - bool hasComputeQueue = false; - for (uint32_t i = 0; i < queueFamilies.size(); i++) { - if (queueFamilies[i].queueFlags & VK_QUEUE_COMPUTE_BIT) { - pickedQueueFamilyIndex = i; - hasComputeQueue = true; - break; - } - } - if (!hasComputeQueue) continue; - mPhysicalDevice = physicalDevice; - mQueueFamilyIndex = pickedQueueFamilyIndex; - break; - } - if (mPhysicalDevice == VK_NULL_HANDLE) { - GTEST_SKIP() << "No device can handle a compute queue"; - } - - // Get physical device properties - vkGetPhysicalDeviceProperties(mPhysicalDevice, &mPhysicalDeviceProperties); - vkGetPhysicalDeviceMemoryProperties(mPhysicalDevice, &mPhysicalDeviceMemoryProperties); - - // Check physical device version - if (mPhysicalDeviceProperties.apiVersion < VK_API_VERSION_1_1) { - GTEST_SKIP() << "Device API version too low"; - } - - // Check if the physical device is able to handle the compute work - const auto dispatchSize = chooseDispatchSize<dataType>(mPhysicalDeviceProperties.limits); - if (mPhysicalDeviceProperties.limits.maxComputeWorkGroupCount[0] < - dispatchSize.groupCountX) { - GTEST_SKIP() << "Device cannot handle " << dispatchSize.groupCountX - << " workgroups for the X dimension"; - } - if (mPhysicalDeviceProperties.limits.maxComputeWorkGroupCount[1] < - dispatchSize.groupCountY) { - GTEST_SKIP() << "Device cannot handle " << dispatchSize.groupCountY - << " workgroups for the Y dimension"; - } - - // Enumerate device extensions - uint32_t numberOfExtensions = 0; - ASSERT_EQ(vkEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, - &numberOfExtensions, nullptr), - VK_SUCCESS); - std::vector<VkExtensionProperties> extensions(numberOfExtensions); - ASSERT_EQ(vkEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, - &numberOfExtensions, extensions.data()), - VK_SUCCESS); - - // Required device extensions - std::vector<const char*> requiredDeviceExtensions = { - // The following extensions are required to import an AHardwareBuffer to Vulkan - VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME, - VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME, - VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, - VK_KHR_BIND_MEMORY_2_EXTENSION_NAME, - VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME, - // The following extensions are required to export a sync fence - VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME, - VK_KHR_MAINTENANCE1_EXTENSION_NAME, - }; - for (const char* requiredDeviceExtension : requiredDeviceExtensions) { - if (!isExtensionSupported(extensions, requiredDeviceExtension)) { - GTEST_SKIP() << "Device extension " << requiredDeviceExtension - << " is not supported"; - } - } - - // Check external memory properties - const VkPhysicalDeviceExternalBufferInfo externalBufferInfo = { - .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, - .pNext = nullptr, - .flags = 0u, - .usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, - .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID, - }; - VkExternalBufferProperties externalBufferProperties; - vkGetPhysicalDeviceExternalBufferProperties(mPhysicalDevice, &externalBufferInfo, - &externalBufferProperties); - if (!(externalBufferProperties.externalMemoryProperties.externalMemoryFeatures & - VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) { - GTEST_SKIP() << "Device is not able to import Android hardware buffer"; - } - ASSERT_FALSE(externalBufferProperties.externalMemoryProperties.externalMemoryFeatures & - VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT); - - // Check external fence properties - const VkPhysicalDeviceExternalFenceInfo externalFenceInfo = { - .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, - .pNext = nullptr, - .handleType = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, - }; - VkExternalFenceProperties externalFenceProperties; - vkGetPhysicalDeviceExternalFenceProperties(mPhysicalDevice, &externalFenceInfo, - &externalFenceProperties); - if (!(externalFenceProperties.externalFenceFeatures & - VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT)) { - GTEST_SKIP() << "Device is not able to export Android sync fence FD"; - } - - // Create logical device - const float queuePriority = 1.0f; - const VkDeviceQueueCreateInfo queueDesc = { - .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, - .queueFamilyIndex = mQueueFamilyIndex, - .queueCount = 1, - .pQueuePriorities = &queuePriority, - }; - const VkDeviceCreateInfo deviceDesc = { - .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, - .queueCreateInfoCount = 1, - .pQueueCreateInfos = &queueDesc, - .enabledExtensionCount = static_cast<uint32_t>(requiredDeviceExtensions.size()), - .ppEnabledExtensionNames = requiredDeviceExtensions.data(), - .pEnabledFeatures = nullptr, - }; - ASSERT_EQ(vkCreateDevice(mPhysicalDevice, &deviceDesc, nullptr, &mDevice), VK_SUCCESS); - vkGetDeviceQueue(mDevice, mQueueFamilyIndex, 0, &mQueue); - - // Get extension function pointers - mPfnVkGetFenceFdKHR = reinterpret_cast<PFN_vkGetFenceFdKHR>( - vkGetDeviceProcAddr(mDevice, "vkGetFenceFdKHR")); - ASSERT_NE(mPfnVkGetFenceFdKHR, nullptr); - - // Create descriptor pool - const std::vector<VkDescriptorPoolSize> descriptorPoolSizes = { - { - .type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, - .descriptorCount = 1, - }, - }; - const VkDescriptorPoolCreateInfo descriptorPoolCreateInfo = { - .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, - .maxSets = 1, - .poolSizeCount = static_cast<uint32_t>(descriptorPoolSizes.size()), - .pPoolSizes = descriptorPoolSizes.data(), - }; - ASSERT_EQ(vkCreateDescriptorPool(mDevice, &descriptorPoolCreateInfo, nullptr, - &mDescriptorPool), - VK_SUCCESS); - - // Create descriptor set layout - const std::vector<VkDescriptorSetLayoutBinding> descriptorsetLayoutBinding = { - { - .binding = 0, // output buffer - .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, - .descriptorCount = 1, - .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, - }, - - }; - const VkDescriptorSetLayoutCreateInfo descriptorsetLayoutDesc = { - .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, - .bindingCount = static_cast<uint32_t>(descriptorsetLayoutBinding.size()), - .pBindings = descriptorsetLayoutBinding.data(), - }; - ASSERT_EQ(vkCreateDescriptorSetLayout(mDevice, &descriptorsetLayoutDesc, nullptr, - &mDescriptorSetLayout), - VK_SUCCESS); - - // Allocate descriptor set - const VkDescriptorSetAllocateInfo descriptorSetAllocateInfo = { - .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, - .descriptorPool = mDescriptorPool, - .descriptorSetCount = 1, - .pSetLayouts = &mDescriptorSetLayout, - }; - ASSERT_EQ(vkAllocateDescriptorSets(mDevice, &descriptorSetAllocateInfo, &mDescriptorSet), - VK_SUCCESS); - - // Check the output AHardwareBuffer format and usage bits - AHardwareBuffer_Desc desc; - AHardwareBuffer_describe(output, &desc); - ASSERT_EQ(desc.format, AHARDWAREBUFFER_FORMAT_BLOB); - ASSERT_TRUE(desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER); - - // Get AHardwareBuffer properties - VkAndroidHardwareBufferPropertiesANDROID properties = { - .sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID, - .pNext = nullptr, - }; - ASSERT_EQ(vkGetAndroidHardwareBufferPropertiesANDROID(mDevice, output, &properties), - VK_SUCCESS); - - // Create the output buffer with AHardwareBuffer memory - const VkExternalMemoryBufferCreateInfo externalMemoryBufferCreateInfo = { - .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, - .pNext = nullptr, - .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID, - }; - const VkBufferCreateInfo bufferCreateInfo = { - .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, - .pNext = &externalMemoryBufferCreateInfo, - .flags = 0u, - .size = desc.width, - .usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, - .sharingMode = VK_SHARING_MODE_EXCLUSIVE, - .queueFamilyIndexCount = 0u, - .pQueueFamilyIndices = nullptr, - }; - ASSERT_EQ(vkCreateBuffer(mDevice, &bufferCreateInfo, nullptr, &mOutputBuffer), VK_SUCCESS); - - // Find a proper memory type - const auto maybeMemoryTypeIndex = - findMemoryType(mPhysicalDeviceMemoryProperties, properties.memoryTypeBits, - properties.allocationSize); - if (!maybeMemoryTypeIndex.has_value()) { - GTEST_SKIP() << "None of the memory type is suitable for allocation"; - } - - // Import the AHardwareBuffer memory - const VkImportAndroidHardwareBufferInfoANDROID importMemoryAllocateInfo = { - .sType = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID, - .pNext = nullptr, - .buffer = output, - }; - const VkMemoryAllocateInfo memoryAllocInfo = { - .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, - .pNext = &importMemoryAllocateInfo, - .allocationSize = properties.allocationSize, - .memoryTypeIndex = maybeMemoryTypeIndex.value(), - }; - const auto allocationResult = - vkAllocateMemory(mDevice, &memoryAllocInfo, nullptr, &mOutputBufferMemory); - // Memory allocation may fail if the size exceeds the upper limit of a single allocation - // that the platform supports - if (allocationResult == VK_ERROR_OUT_OF_DEVICE_MEMORY) { - GTEST_SKIP() << "Unable to allocate device memory of " << properties.allocationSize - << " bytes"; - } - ASSERT_EQ(allocationResult, VK_SUCCESS); - - // Bind the memory with the buffer - ASSERT_EQ(vkBindBufferMemory(mDevice, mOutputBuffer, mOutputBufferMemory, 0), VK_SUCCESS); - - // Update the descriptor sets - const VkDescriptorBufferInfo outputBufferDesc = { - .buffer = mOutputBuffer, - .offset = 0, - .range = VK_WHOLE_SIZE, - }; - const std::vector<VkWriteDescriptorSet> writeDst = { - { - .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, - .pNext = nullptr, - .dstSet = mDescriptorSet, - .dstBinding = 0, // output buffer - .dstArrayElement = 0, - .descriptorCount = 1, - .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, - .pImageInfo = nullptr, - .pBufferInfo = &outputBufferDesc, - .pTexelBufferView = nullptr, - }, - }; - vkUpdateDescriptorSets(mDevice, writeDst.size(), writeDst.data(), 0, nullptr); - - // Create shader module - const VkShaderModuleCreateInfo shaderDesc = { - .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, - .flags = 0, - .codeSize = kComputeShader.size() * sizeof(uint32_t), - .pCode = kComputeShader.data(), - }; - ASSERT_EQ(vkCreateShaderModule(mDevice, &shaderDesc, nullptr, &mShaderModule), VK_SUCCESS); - - // Create pipeline layout - const VkPipelineLayoutCreateInfo layoutDesc = { - .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, - .setLayoutCount = 1, - .pSetLayouts = &mDescriptorSetLayout, - .pushConstantRangeCount = 0, - .pPushConstantRanges = nullptr, - }; - ASSERT_EQ(vkCreatePipelineLayout(mDevice, &layoutDesc, nullptr, &mPipelineLayout), - VK_SUCCESS); - - // Create compute pipeline - const uint32_t specializationData[] = { - dispatchSize.workgroupSize, // local_size_x - dispatchSize.workgroupSize, // local_size_y - TestTypeHelper<dataType>::kClearData, // CLEAR_DATA - }; - const std::vector<VkSpecializationMapEntry> specializationMap = { - // {constantID, offset, size} - {0, 0 * sizeof(uint32_t), sizeof(uint32_t)}, - {1, 1 * sizeof(uint32_t), sizeof(uint32_t)}, - {2, 2 * sizeof(uint32_t), sizeof(uint32_t)}, - }; - const VkSpecializationInfo specializationInfo = { - .mapEntryCount = static_cast<uint32_t>(specializationMap.size()), - .pMapEntries = specializationMap.data(), - .dataSize = sizeof(specializationData), - .pData = specializationData, - }; - const VkComputePipelineCreateInfo pipelineDesc = { - .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, - .stage = - { - .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, - .stage = VK_SHADER_STAGE_COMPUTE_BIT, - .module = mShaderModule, - .pName = "main", - .pSpecializationInfo = &specializationInfo, - }, - .layout = mPipelineLayout, - }; - ASSERT_EQ(vkCreateComputePipelines(mDevice, VK_NULL_HANDLE, 1, &pipelineDesc, nullptr, - &mPipeline), - VK_SUCCESS); - - // Create command pool - const VkCommandPoolCreateInfo cmdpoolDesc = { - .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, - .flags = 0u, - .queueFamilyIndex = mQueueFamilyIndex, - }; - ASSERT_EQ(vkCreateCommandPool(mDevice, &cmdpoolDesc, nullptr, &mCommandPool), VK_SUCCESS); - - // Create a command buffer - const VkCommandBufferAllocateInfo cmdBufferCreateInfo = { - .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, - .pNext = nullptr, - .commandPool = mCommandPool, - .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, - .commandBufferCount = 1, - }; - ASSERT_EQ(vkAllocateCommandBuffers(mDevice, &cmdBufferCreateInfo, &mCommandBuffer), - VK_SUCCESS); - - // Record command buffer - const VkCommandBufferBeginInfo commandBufferBeginInfo = { - .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, - .pNext = nullptr, - .flags = 0, - .pInheritanceInfo = nullptr, - }; - ASSERT_EQ(vkBeginCommandBuffer(mCommandBuffer, &commandBufferBeginInfo), VK_SUCCESS); - - // Buffer barrier to acquire the ownership of the output buffer - addBufferTransitionBarrier(mCommandBuffer, mOutputBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, - VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, - VK_ACCESS_SHADER_WRITE_BIT, VK_QUEUE_FAMILY_FOREIGN_EXT, - mQueueFamilyIndex); - - // Setup resources - vkCmdBindPipeline(mCommandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, mPipeline); - vkCmdBindDescriptorSets(mCommandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, mPipelineLayout, 0, - 1, &mDescriptorSet, 0, nullptr); - - // Dispatch compute - vkCmdDispatch(mCommandBuffer, dispatchSize.groupCountX, dispatchSize.groupCountY, 1); - - // Buffer barrier to release the ownership of the output buffer - addBufferTransitionBarrier(mCommandBuffer, mOutputBuffer, - VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, - VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_ACCESS_SHADER_WRITE_BIT, - 0, mQueueFamilyIndex, VK_QUEUE_FAMILY_FOREIGN_EXT); - - // Finish recording the command buffer - ASSERT_EQ(vkEndCommandBuffer(mCommandBuffer), VK_SUCCESS); - - // Create fence - const VkExportFenceCreateInfo exportFenceCreateInfo = { - .sType = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO, - .pNext = nullptr, - .handleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, - }; - const VkFenceCreateInfo fenceCreateInfo = { - .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, - .pNext = &exportFenceCreateInfo, - .flags = 0, - }; - ASSERT_EQ(vkCreateFence(mDevice, &fenceCreateInfo, nullptr, &mFence), VK_SUCCESS); - - mIsValid = true; - } - - void runInternal(bool* outSuccess, base::unique_fd* outSyncFd) { - *outSuccess = false; - - // Submit to queue - const VkSubmitInfo submitInfo = { - .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, - .waitSemaphoreCount = 0, - .pWaitSemaphores = nullptr, - .pWaitDstStageMask = nullptr, - .commandBufferCount = 1, - .pCommandBuffers = &mCommandBuffer, - .signalSemaphoreCount = 0, - .pSignalSemaphores = nullptr, - }; - ASSERT_EQ(vkResetFences(mDevice, 1, &mFence), VK_SUCCESS); - ASSERT_EQ(vkQueueSubmit(mQueue, 1, &submitInfo, mFence), VK_SUCCESS); - - // Export a Android sync fence FD - int syncFd = -1; - const VkFenceGetFdInfoKHR fenceGetFdInfo = { - .sType = VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, - .pNext = nullptr, - .fence = mFence, - .handleType = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, - }; - ASSERT_EQ(mPfnVkGetFenceFdKHR(mDevice, &fenceGetFdInfo, &syncFd), VK_SUCCESS); - *outSyncFd = base::unique_fd(syncFd); - - *outSuccess = true; - } - - // Instance - VkInstance mInstance = VK_NULL_HANDLE; - - // Physical device and queue family - VkPhysicalDevice mPhysicalDevice = VK_NULL_HANDLE; - VkPhysicalDeviceProperties mPhysicalDeviceProperties{}; - VkPhysicalDeviceMemoryProperties mPhysicalDeviceMemoryProperties{}; - uint32_t mQueueFamilyIndex = 0; - - // Logical device and queue - VkDevice mDevice = VK_NULL_HANDLE; - VkQueue mQueue = VK_NULL_HANDLE; - - // Extension functions - PFN_vkGetFenceFdKHR mPfnVkGetFenceFdKHR = nullptr; - - // Resource descriptors - VkDescriptorPool mDescriptorPool = VK_NULL_HANDLE; - VkDescriptorSetLayout mDescriptorSetLayout = VK_NULL_HANDLE; - VkDescriptorSet mDescriptorSet = VK_NULL_HANDLE; - - // Output buffer - VkBuffer mOutputBuffer = VK_NULL_HANDLE; - VkDeviceMemory mOutputBufferMemory = VK_NULL_HANDLE; - - // Compute pipeline - VkShaderModule mShaderModule = VK_NULL_HANDLE; - VkPipelineLayout mPipelineLayout = VK_NULL_HANDLE; - VkPipeline mPipeline = VK_NULL_HANDLE; - - // Command buffer - VkCommandPool mCommandPool = VK_NULL_HANDLE; - VkCommandBuffer mCommandBuffer = VK_NULL_HANDLE; - VkFence mFence = VK_NULL_HANDLE; - - bool mIsValid = false; -}; - -template <Type dataType> -class NnapiExecutor { - public: - // Returns the created object on success, or nullptr on failure. - static std::unique_ptr<NnapiExecutor> create(const ANeuralNetworksDevice* device, - AHardwareBuffer* input, AHardwareBuffer* output) { - auto nnapi = std::make_unique<NnapiExecutor>(input, output); - nnapi->initialize(device); - return nnapi->mIsValid ? std::move(nnapi) : nullptr; - } - - // Prefer NnapiExecutor::create - NnapiExecutor(AHardwareBuffer* input, AHardwareBuffer* output) - : mInputMemory(input), mOutputMemory(output) {} - - // Returns {success, sync_fd} - std::pair<bool, base::unique_fd> run(const base::unique_fd& inSyncFd) { - bool success = false; - base::unique_fd outSyncFd; - runInternal(inSyncFd, &success, &outSyncFd); - return {success, std::move(outSyncFd)}; - } - - private: - using ElementType = typename TestTypeHelper<dataType>::ElementType; - - void initialize(const ANeuralNetworksDevice* device) { - ASSERT_TRUE(mInputMemory.isValid()); - ASSERT_TRUE(mOutputMemory.isValid()); - - // Model input - const float scale = TestTypeHelper<dataType>::kIsQuantized ? 1.0f : 0.0f; - const OperandType tensorType(dataType, {kOperandSizeY, kOperandSizeX}, scale, - /*zeroPoint=*/0); - uint32_t inputTensor = mModel.addOperand(&tensorType); - - // Constant tensor - const OperandType constTensorType(dataType, {1}, scale, /*zeroPoint=*/0); - const ElementType constTensorData = static_cast<ElementType>(1); - uint32_t constTensor = - mModel.addConstantOperand<ElementType>(&constTensorType, constTensorData); - - // Activation (NONE) - const OperandType activationType(Type::INT32, {}); - uint32_t activationScalar = mModel.addConstantOperand<int32_t>(&activationType, 0); - - // Model output - uint32_t outputTensor = mModel.addOperand(&tensorType); - - // Model operation - mModel.addOperation(ANEURALNETWORKS_ADD, {inputTensor, constTensor, activationScalar}, - {outputTensor}); - - // Finish model - mModel.identifyInputsAndOutputs({inputTensor}, {outputTensor}); - mModel.relaxComputationFloat32toFloat16(/*isRelax=*/true); - ASSERT_TRUE(mModel.isValid()); - ASSERT_EQ(mModel.finish(), Result::NO_ERROR); - - // Create compilation for the target device - Result result; - std::tie(result, mCompilation) = - test_wrapper::Compilation::createForDevice(&mModel, device); - ASSERT_EQ(result, Result::NO_ERROR); - - // Finish the compilation - result = mCompilation.finish(); - if (result != Result::NO_ERROR) { - GTEST_SKIP() << "Model is not supported by the device"; - } - - mIsValid = true; - } - - void runInternal(const base::unique_fd& inSyncFd, bool* outSuccess, - base::unique_fd* outSyncFd) { - *outSuccess = false; - - // Setup execution - mExecution = std::make_unique<test_wrapper::Execution>(&mCompilation); - ASSERT_EQ(mExecution->setInputFromMemory(/*index=*/0, &mInputMemory, /*offset=*/0, - kOperandLength * sizeof(ElementType)), - Result::NO_ERROR); - ASSERT_EQ(mExecution->setOutputFromMemory(/*index=*/0, &mOutputMemory, /*offset=*/0, - kOperandLength * sizeof(ElementType)), - Result::NO_ERROR); - - // Setup dependencies - std::vector<const test_wrapper::Event*> dependencies; - test_wrapper::Event start; - // The sync fence from Vulkan may not be valid if GPU workload has already finished - // prior to exporting the fence. - if (inSyncFd.ok()) { - start = test_wrapper::Event(inSyncFd.get()); - ASSERT_TRUE(start.isValid()); - dependencies = {&start}; - } - - // Fenced compute - test_wrapper::Event finished; - mExecution->startComputeWithDependencies(dependencies, /*infinite timeout*/ 0, &finished); - - // Get the output sync fence if supported; Otherwise, wait until the execution is finished - int syncFd = -1; - finished.getSyncFenceFd(&syncFd); - if (syncFd == -1) { - ASSERT_EQ(finished.wait(), Result::NO_ERROR); - } - *outSyncFd = base::unique_fd(syncFd); - *outSuccess = true; - } - - test_wrapper::Model mModel; - test_wrapper::Compilation mCompilation; - std::unique_ptr<test_wrapper::Execution> mExecution; - test_wrapper::Memory mInputMemory, mOutputMemory; - bool mIsValid = false; -}; - -class GpuNnapiTest : public testing::TestWithParam<NameAndDevice> { - protected: - void TearDown() override { - if (mGpuOutput) { - AHardwareBuffer_release(mGpuOutput); - } - if (mNnapiOutput) { - AHardwareBuffer_release(mNnapiOutput); - } - } - - template <Type dataType> - void runTest() { -#ifndef NNTEST_ONLY_PUBLIC_API - if (DeviceManager::get()->getUseCpuOnly()) { - GTEST_SKIP(); - } -#endif - - // Allocate hardware buffers for GPU and NNAPI outputs - const size_t size = kOperandLength * sizeof(typename TestTypeHelper<dataType>::ElementType); - allocateBlobAhwb( - size, AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER | AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN, - &mGpuOutput); - allocateBlobAhwb( - size, AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN, - &mNnapiOutput); - if (mGpuOutput == nullptr || mNnapiOutput == nullptr) return; - - // Create Vulkan compute pipeline - auto vulkan = VulkanComputePipeline<dataType>::create(mGpuOutput); - if (vulkan == nullptr) return; - - // Create NNAPI executor - auto nnapi = NnapiExecutor<dataType>::create(kDevice, mGpuOutput, mNnapiOutput); - if (nnapi == nullptr) return; - - // Run the test repeatly for kNumberOfIterationsToTest iterations - for (uint32_t i = 0; i < kNumberOfIterationsToTest; i++) { - auto [gpuSuccess, gpuSyncFd] = vulkan->run(); - ASSERT_TRUE(gpuSuccess); - - auto [nnapiSuccess, nnapiSyncFd] = nnapi->run(gpuSyncFd); - ASSERT_TRUE(nnapiSuccess); - - const double tolerance = TestTypeHelper<dataType>::kTolerance; - checkResults<dataType>(std::move(nnapiSyncFd), tolerance); - } - } - - template <Type dataType> - void checkResults(base::unique_fd syncFd, double tolerance) { - using ElementType = typename TestTypeHelper<dataType>::ElementType; - - // Lock the buffer with the sync fence - // AHardwareBuffer_lock will take the ownership and close the sync fence even on errors - void* data; - ASSERT_EQ(AHardwareBuffer_lock(mNnapiOutput, AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN, - syncFd.release(), /*rect=*/nullptr, &data), - 0); - - // Compare the actual results with the expect value - uint32_t numberOfErrors = 0; - const ElementType expected = static_cast<ElementType>(kExpectedResultInInt); - for (uint32_t i = 0; i < kOperandLength; i++) { - const ElementType actual = reinterpret_cast<ElementType*>(data)[i]; - - // We expect the absolute difference in double is within the tolerance. - const double expected_f64 = static_cast<double>(expected); - const double actual_f64 = static_cast<double>(actual); - const double diff = std::abs(expected_f64 - actual_f64); - if (diff > tolerance) { - // Print at most kMaxNumberOfPrintedErrors errors by EXPECT_EQ - if (numberOfErrors < kMaxNumberOfPrintedErrors) { - EXPECT_NEAR(actual_f64, expected_f64, tolerance) - << "When comparing element [" << kOperandLength / kOperandSizeX << ", " - << kOperandLength % kOperandSizeX << "]"; - } - numberOfErrors++; - } - } - EXPECT_EQ(numberOfErrors, 0u); - ASSERT_EQ(AHardwareBuffer_unlock(mNnapiOutput, /*fence=*/nullptr), 0); - } - - // The NNAPI device under test - const ANeuralNetworksDevice* kDevice = GetParam().second; - - AHardwareBuffer* mGpuOutput = nullptr; - AHardwareBuffer* mNnapiOutput = nullptr; -}; - -TEST_P(GpuNnapiTest, Float32) { - runTest<Type::TENSOR_FLOAT32>(); -} -TEST_P(GpuNnapiTest, Float16) { - runTest<Type::TENSOR_FLOAT16>(); -} -TEST_P(GpuNnapiTest, Quant8Asymm) { - runTest<Type::TENSOR_QUANT8_ASYMM>(); -} -TEST_P(GpuNnapiTest, Quant8AsymmSigned) { - runTest<Type::TENSOR_QUANT8_ASYMM_SIGNED>(); -} - -INSTANTIATE_TEST_SUITE_P(TestGpuNnapi, GpuNnapiTest, testing::ValuesIn(getNnapiDevices()), - printGpuNnapiTest); - -} // namespace -} // namespace android::nn
diff --git a/runtime/test/TestIntrospectionControl.cpp b/runtime/test/TestIntrospectionControl.cpp index 9125371..913e6ed 100644 --- a/runtime/test/TestIntrospectionControl.cpp +++ b/runtime/test/TestIntrospectionControl.cpp
@@ -14,13 +14,8 @@ * limitations under the License. */ -#include <ExecutionBurstServer.h> -#include <HalInterfaces.h> -#include <SampleDriver.h> -#include <ValidateHal.h> #include <gtest/gtest.h> -#include <algorithm> #include <chrono> #include <iterator> #include <map> @@ -33,19 +28,20 @@ #include <vector> #include "CompilationBuilder.h" -#include "HalUtils.h" +#include "ExecutionBurstServer.h" +#include "HalInterfaces.h" #include "Manager.h" #include "NeuralNetworks.h" #include "NeuralNetworksOEM.h" +#include "SampleDriver.h" #include "TestNeuralNetworksWrapper.h" +#include "Utils.h" +#include "ValidateHal.h" namespace { using namespace ::android; -namespace V1_0 = ::android::hardware::neuralnetworks::V1_0; -namespace V1_1 = ::android::hardware::neuralnetworks::V1_1; -namespace V1_2 = ::android::hardware::neuralnetworks::V1_2; -namespace V1_3 = ::android::hardware::neuralnetworks::V1_3; +using namespace nn::hal; using CompilationBuilder = nn::CompilationBuilder; using Device = nn::Device; @@ -53,6 +49,7 @@ using ExecutePreference = nn::test_wrapper::ExecutePreference; using ExecutionBurstServer = nn::ExecutionBurstServer; using HidlModel = V1_3::Model; +using PreparedModelCallback = nn::PreparedModelCallback; using Result = nn::test_wrapper::Result; using SampleDriver = nn::sample_driver::SampleDriver; using SamplePreparedModel = nn::sample_driver::SamplePreparedModel; @@ -66,42 +63,40 @@ template <typename T> using MQDescriptorSync = hardware::MQDescriptorSync<T>; -constexpr V1_2::Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; -constexpr V1_2::Timing kGoodUnfencedTiming = {.timeOnDevice = 123, .timeInDriver = 456}; -constexpr V1_2::Timing kGoodFencedTiming = {.timeOnDevice = 23, .timeInDriver = 56}; +constexpr Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; +constexpr Timing kGoodUnfencedTiming = {.timeOnDevice = 123, .timeInDriver = 456}; +constexpr Timing kGoodFencedTiming = {.timeOnDevice = 23, .timeInDriver = 56}; // This is an IDevice for testing purposes. The test driver has customized // getCapabilities_1_3 and getSupportedOperations_1_3. class TestDriver : public SampleDriver { public: - TestDriver(const char* name, V1_3::Capabilities capabilities, - const std::vector<bool>& supportedOps) + TestDriver(const char* name, Capabilities capabilities, const std::vector<bool>& supportedOps) : SampleDriver(name), mCapabilities(capabilities), mSupportedOps(supportedOps) {} ~TestDriver() override {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { cb(V1_3::ErrorStatus::NONE, mCapabilities); - return hardware::Void(); + return Void(); } - hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model, - getSupportedOperations_1_3_cb cb) override { + Return<void> getSupportedOperations_1_3(const Model& model, + getSupportedOperations_1_3_cb cb) override { if (!android::nn::validateModel(model)) { cb(V1_3::ErrorStatus::INVALID_ARGUMENT, std::vector<bool>()); - return hardware::Void(); + return Void(); } const size_t count = model.main.operations.size(); std::vector<bool> supported(count); - std::transform(model.main.operations.begin(), model.main.operations.end(), - supported.begin(), [this](V1_3::Operation op) { - return mSupportedOps[static_cast<int32_t>(op.type)]; - }); + std::transform( + model.main.operations.begin(), model.main.operations.end(), supported.begin(), + [this](Operation op) { return mSupportedOps[static_cast<int32_t>(op.type)]; }); cb(V1_3::ErrorStatus::NONE, supported); - return hardware::Void(); + return Void(); } private: - V1_3::Capabilities mCapabilities; + Capabilities mCapabilities; std::vector<bool> mSupportedOps; }; @@ -124,7 +119,7 @@ struct DeviceSpecification { DeviceSpecification(const std::string& name, float perf, std::vector<bool>& supportedOps) : mName(name), mSupportedOps(supportedOps) { - V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf}; + PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf}; mCapabilities = { .relaxedFloat32toFloat16PerformanceScalar = perfInfo, .relaxedFloat32toFloat16PerformanceTensor = perfInfo, @@ -134,17 +129,17 @@ .whilePerformance = perfInfo}; } std::string mName; - V1_3::Capabilities mCapabilities; + Capabilities mCapabilities; std::vector<bool> mSupportedOps; }; // From a vector of DeviceSpecification, register new Devices. void registerDevices(std::vector<DeviceSpecification> specifications) { for (const auto& specification : specifications) { - DeviceManager::get()->forTest_registerDevice(nn::makeSharedDevice( + DeviceManager::get()->forTest_registerDevice( specification.mName.c_str(), new TestDriver(specification.mName.c_str(), specification.mCapabilities, - specification.mSupportedOps))); + specification.mSupportedOps)); } } @@ -388,14 +383,14 @@ // Returns (unfenced timing, fenced timing). // Not for PASS_CPU. -std::pair<V1_2::Timing, V1_2::Timing> getExpectedTiming(Success s, bool fencedExecution) { +std::pair<Timing, Timing> getExpectedTiming(Success s, bool fencedExecution) { CHECK_NE(s, Success::PASS_CPU); if (!hasBit(s, Success::PASS_BIT)) { return {kBadTiming, kBadTiming}; } - std::pair<V1_2::Timing, V1_2::Timing> result; + std::pair<Timing, Timing> result; result.first.timeOnDevice = hasBit(s, Success::PASS_UNFENCED_DEVICE_BIT) ? kGoodUnfencedTiming.timeOnDevice : UINT64_MAX; @@ -416,17 +411,17 @@ } // For these tests we don't care about actually running an inference -- we -// just want to placeholder up execution status and timing results, and control +// just want to dummy up execution status and timing results, and control // when the execution finishes. class TestPreparedModelLatest : public SamplePreparedModel { public: TestPreparedModelLatest(const HidlModel& model, const SampleDriver* driver, Success success) - : SamplePreparedModel(model, driver, V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, uid_t{}, - nn::kDefaultPriority13), + : SamplePreparedModel(model, driver, ExecutionPreference::FAST_SINGLE_ANSWER, uid_t{}, + kDefaultPriority), mSuccess(success) {} - hardware::Return<V1_0::ErrorStatus> execute( - const V1_0::Request&, const sp<V1_0::IExecutionCallback>& callback) override { + Return<V1_0::ErrorStatus> execute(const V1_0::Request&, + const sp<V1_0::IExecutionCallback>& callback) override { switch (mSuccess) { case Success::PASS_NEITHER: std::thread([callback] { @@ -450,10 +445,9 @@ } } - hardware::Return<V1_0::ErrorStatus> execute_1_2( - const V1_0::Request&, V1_2::MeasureTiming measure, - const sp<V1_2::IExecutionCallback>& callback) override { - EXPECT_EQ(measure, V1_2::MeasureTiming::YES); + Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming measure, + const sp<V1_2::IExecutionCallback>& callback) override { + EXPECT_EQ(measure, MeasureTiming::YES); switch (mSuccess) { case Success::PASS_NEITHER: case Success::PASS_DEVICE: @@ -481,18 +475,17 @@ } } - hardware::Return<V1_3::ErrorStatus> execute_1_3( - const V1_3::Request&, V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint&, - const V1_3::OptionalTimeoutDuration&, - const sp<V1_3::IExecutionCallback>& callback) override { - // Use a placeholder V1_0::Request because execute_1_2 ignores request entirely. + Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming measure, + const OptionalTimePoint&, const OptionalTimeoutDuration&, + const sp<V1_3::IExecutionCallback>& callback) override { + // Use a dummy V1_0::Request because execute_1_2 ignores request entirely. const V1_0::ErrorStatus status = execute_1_2(V1_0::Request{}, measure, callback); return convertToV1_3(status); } - hardware::Return<void> executeSynchronously(const V1_0::Request&, V1_2::MeasureTiming measure, - executeSynchronously_cb cb) override { - EXPECT_EQ(measure, V1_2::MeasureTiming::YES); + Return<void> executeSynchronously(const V1_0::Request&, MeasureTiming measure, + executeSynchronously_cb cb) override { + EXPECT_EQ(measure, MeasureTiming::YES); switch (mSuccess) { case Success::PASS_NEITHER: case Success::PASS_DEVICE: @@ -500,7 +493,7 @@ case Success::PASS_BOTH: dummyExecution(); cb(V1_0::ErrorStatus::NONE, {}, getExpectedTiming(mSuccess, false).first); - return hardware::Void(); + return Void(); case Success::FAIL_WAIT: // While this is a synchronous execution method, the NNAPI // runtime may call it even for asynchronous execution, so we @@ -510,32 +503,29 @@ case Success::FAIL_LAUNCH: dummyExecution(); cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kBadTiming); - return hardware::Void(); + return Void(); default: ADD_FAILURE() << "Unexpected Success kind"; cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kBadTiming); - return hardware::Void(); + return Void(); } } - hardware::Return<void> executeSynchronously_1_3(const V1_3::Request&, - V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint&, - const V1_3::OptionalTimeoutDuration&, - executeSynchronously_1_3_cb cb) override { + Return<void> executeSynchronously_1_3(const V1_3::Request&, MeasureTiming measure, + const OptionalTimePoint&, const OptionalTimeoutDuration&, + executeSynchronously_1_3_cb cb) override { const auto wrappedCb = [&cb](V1_0::ErrorStatus status, - const hardware::hidl_vec<V1_2::OutputShape>& outputShapes, - V1_2::Timing timing) { + const hidl_vec<OutputShape>& outputShapes, Timing timing) { cb(convertToV1_3(status), outputShapes, timing); }; - // Use a placeholder V1_0::Request because executeSynchronously ignores request entirely. + // Use a dummy V1_0::Request because executeSynchronously ignores request entirely. return executeSynchronously(V1_0::Request{}, measure, wrappedCb); } // ExecutionBurstServer::create has an overload that will use // IPreparedModel::executeSynchronously(), so we can rely on that, rather // than having to implement ExecutionBurstServer::IExecutorWithCache. - hardware::Return<void> configureExecutionBurst( + Return<void> configureExecutionBurst( const sp<V1_2::IBurstCallback>& callback, const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel, const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, @@ -544,26 +534,21 @@ callback, requestChannel, resultChannel, this, std::chrono::microseconds{0}); cb(burst == nullptr ? V1_0::ErrorStatus::GENERAL_FAILURE : V1_0::ErrorStatus::NONE, burst); - return hardware::Void(); + return Void(); } - hardware::Return<void> executeFenced(const V1_3::Request&, - const hardware::hidl_vec<hardware::hidl_handle>&, - V1_2::MeasureTiming measure, - const V1_3::OptionalTimePoint&, - const V1_3::OptionalTimeoutDuration&, - const V1_3::OptionalTimeoutDuration&, - executeFenced_cb callback) override { - EXPECT_EQ(measure, V1_2::MeasureTiming::YES); + Return<void> executeFenced(const Request&, const hidl_vec<hidl_handle>&, MeasureTiming measure, + const OptionalTimePoint&, const OptionalTimeoutDuration&, + const OptionalTimeoutDuration&, executeFenced_cb callback) override { + EXPECT_EQ(measure, MeasureTiming::YES); if (hasBit(mSuccess, Success::PASS_BIT)) { dummyExecution(); const auto expectedTiming = getExpectedTiming(mSuccess, true); sp<SampleFencedExecutionCallback> fencedExecutionCallback = new SampleFencedExecutionCallback(expectedTiming.first, expectedTiming.second, V1_3::ErrorStatus::NONE); - callback(V1_3::ErrorStatus::NONE, hardware::hidl_handle(nullptr), - fencedExecutionCallback); - return hardware::Void(); + callback(V1_3::ErrorStatus::NONE, hidl_handle(nullptr), fencedExecutionCallback); + return Void(); } switch (mSuccess) { case Success::FAIL_WAIT: @@ -574,12 +559,11 @@ FALLTHROUGH_INTENDED; case Success::FAIL_LAUNCH: dummyExecution(); - callback(V1_3::ErrorStatus::GENERAL_FAILURE, hardware::hidl_handle(nullptr), - nullptr); - return hardware::Void(); + callback(V1_3::ErrorStatus::GENERAL_FAILURE, hidl_handle(nullptr), nullptr); + return Void(); default: ADD_FAILURE() << "Unexpected Success kind"; - return hardware::Void(); + return Void(); } } @@ -589,17 +573,10 @@ static void pauseExecutions(bool v) { mPauseExecutions.store(v); } // This function is only guaranteed to work in the following pattern: - // Consider thread A as primary thread - // - thread A: pauseExecutions(true); - // - thread A: launch execution (as thread B) - // - thread A: waitForExecutionToBegin(), block until call to dummyExecution by - // thread B makes mExecutionsInFlight nonzero - // - thread B: dummyExecution(), which makes mExecutionsInFlight nonzero and blocks - // until thread A calls pauseExecutions(false) - // - thread A: waitForExecutionToBegin() returns - // - thread A: pauseExecutions(false), allowing dummyExecution() on thread B to continue - // - thread B: dummyExecution() zeroes mExecutionsInFlight and returns - // - thread B: thread exits + // - pauseExecutions(true); + // - // launch execution + // - // thread A: waitForExecutionToBegin() + // - // thread B: pauseExecutions(false); static void waitForExecutionToBegin() { CHECK(mPauseExecutions.load()); while (mExecutionsInFlight.load() == 0) { @@ -630,24 +607,22 @@ TestPreparedModel12(const HidlModel& model, const SampleDriver* driver, Success success) : mLatestPreparedModel(new TestPreparedModelLatest(model, driver, success)) {} - hardware::Return<V1_0::ErrorStatus> execute( - const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override { + Return<V1_0::ErrorStatus> execute(const V1_0::Request& request, + const sp<V1_0::IExecutionCallback>& callback) override { return mLatestPreparedModel->execute(request, callback); } - hardware::Return<V1_0::ErrorStatus> execute_1_2( - const V1_0::Request& request, V1_2::MeasureTiming measure, - const sp<V1_2::IExecutionCallback>& callback) override { + Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure, + const sp<V1_2::IExecutionCallback>& callback) override { return mLatestPreparedModel->execute_1_2(request, measure, callback); } - hardware::Return<void> executeSynchronously(const V1_0::Request& request, - V1_2::MeasureTiming measure, - executeSynchronously_cb cb) override { + Return<void> executeSynchronously(const V1_0::Request& request, MeasureTiming measure, + executeSynchronously_cb cb) override { return mLatestPreparedModel->executeSynchronously(request, measure, cb); } - hardware::Return<void> configureExecutionBurst( + Return<void> configureExecutionBurst( const sp<V1_2::IBurstCallback>& callback, const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel, const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, @@ -657,7 +632,7 @@ } private: - const sp<V1_3::IPreparedModel> mLatestPreparedModel; + const sp<IPreparedModel> mLatestPreparedModel; }; // Like TestPreparedModelLatest, but implementing 1.0 @@ -666,13 +641,13 @@ TestPreparedModel10(const HidlModel& model, const SampleDriver* driver, Success success) : mLatestPreparedModel(new TestPreparedModelLatest(model, driver, success)) {} - hardware::Return<V1_0::ErrorStatus> execute( - const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override { + Return<V1_0::ErrorStatus> execute(const V1_0::Request& request, + const sp<V1_0::IExecutionCallback>& callback) override { return mLatestPreparedModel->execute(request, callback); } private: - const sp<V1_3::IPreparedModel> mLatestPreparedModel; + const sp<IPreparedModel> mLatestPreparedModel; }; // Behaves like SampleDriver, except that it produces customized IPrepareModel. @@ -681,26 +656,31 @@ TestDriver13(const std::string& name, Success success) : SampleDriver(name.c_str()), mSuccess(success) {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override { + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override { android::nn::initVLogMask(); - V1_3::Capabilities capabilities = nn::makeCapabilities(0.75f); + const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f}; + Capabilities capabilities = { + .relaxedFloat32toFloat16PerformanceScalar = kPerf, + .relaxedFloat32toFloat16PerformanceTensor = kPerf, + .operandPerformance = + nn::nonExtensionOperandPerformance<nn::HalVersion::V1_3>(kPerf)}; _hidl_cb(V1_3::ErrorStatus::NONE, capabilities); - return hardware::Void(); + return Void(); } - hardware::Return<void> getSupportedOperations_1_3(const HidlModel& model, - getSupportedOperations_1_3_cb cb) override { + Return<void> getSupportedOperations_1_3(const HidlModel& model, + getSupportedOperations_1_3_cb cb) override { if (nn::validateModel(model)) { std::vector<bool> supported(model.main.operations.size(), true); cb(V1_3::ErrorStatus::NONE, supported); } else { cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {}); } - return hardware::Void(); + return Void(); } - hardware::Return<void> getSupportedOperations_1_2(const V1_2::Model& model, - getSupportedOperations_1_2_cb cb) override { + Return<void> getSupportedOperations_1_2(const V1_2::Model& model, + getSupportedOperations_1_2_cb cb) override { if (nn::validateModel(model)) { std::vector<bool> supported(model.operations.size(), true); cb(V1_0::ErrorStatus::NONE, supported); @@ -708,41 +688,39 @@ std::vector<bool> supported; cb(V1_0::ErrorStatus::INVALID_ARGUMENT, supported); } - return hardware::Void(); + return Void(); } - hardware::Return<V1_3::ErrorStatus> prepareModel_1_3( - const HidlModel& model, V1_1::ExecutionPreference, V1_3::Priority, - const V1_3::OptionalTimePoint&, const hardware::hidl_vec<hardware::hidl_handle>&, - const hardware::hidl_vec<hardware::hidl_handle>&, const nn::HalCacheToken&, + Return<V1_3::ErrorStatus> prepareModel_1_3( + const HidlModel& model, ExecutionPreference, Priority, const OptionalTimePoint&, + const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&, const sp<V1_3::IPreparedModelCallback>& callback) override { callback->notify_1_3(V1_3::ErrorStatus::NONE, new TestPreparedModel13(model, this, mSuccess)); return V1_3::ErrorStatus::NONE; } - hardware::Return<V1_0::ErrorStatus> prepareModel_1_2( - const V1_2::Model& model, V1_1::ExecutionPreference, - const hardware::hidl_vec<hardware::hidl_handle>&, - const hardware::hidl_vec<hardware::hidl_handle>&, const nn::HalCacheToken&, + Return<V1_0::ErrorStatus> prepareModel_1_2( + const V1_2::Model& model, ExecutionPreference, const hidl_vec<hidl_handle>&, + const hidl_vec<hidl_handle>&, const CacheToken&, const sp<V1_2::IPreparedModelCallback>& callback) override { callback->notify_1_2(V1_0::ErrorStatus::NONE, new TestPreparedModel12(nn::convertToV1_3(model), this, mSuccess)); return V1_0::ErrorStatus::NONE; } - hardware::Return<V1_0::ErrorStatus> prepareModel_1_1( - const V1_1::Model& model, V1_1::ExecutionPreference, + Return<V1_0::ErrorStatus> prepareModel_1_1( + const V1_1::Model& model, ExecutionPreference, const sp<V1_0::IPreparedModelCallback>& callback) override { callback->notify(V1_0::ErrorStatus::NONE, new TestPreparedModel10(nn::convertToV1_3(model), this, mSuccess)); return V1_0::ErrorStatus::NONE; } - hardware::Return<V1_0::ErrorStatus> prepareModel( + Return<V1_0::ErrorStatus> prepareModel( const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback) override { - return prepareModel_1_1(nn::convertToV1_1(model), - V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, callback); + return prepareModel_1_1(nn::convertToV1_1(model), ExecutionPreference::FAST_SINGLE_ANSWER, + callback); } private: @@ -754,27 +732,27 @@ public: TestDriver11(const std::string& name, Success success) : mLatestDriver(new TestDriver13(name, success)) {} - hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { + Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { return mLatestDriver->getCapabilities_1_1(_hidl_cb); } - hardware::Return<void> getSupportedOperations_1_1( - const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override { + Return<void> getSupportedOperations_1_1(const V1_1::Model& model, + getSupportedOperations_1_1_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel_1_1( - const V1_1::Model& model, V1_1::ExecutionPreference preference, + Return<V1_0::ErrorStatus> prepareModel_1_1( + const V1_1::Model& model, ExecutionPreference preference, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel_1_1(model, preference, actualCallback); } - hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } - hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { + Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } + Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { return mLatestDriver->getCapabilities(_hidl_cb); } - hardware::Return<void> getSupportedOperations(const V1_0::Model& model, - getSupportedOperations_cb _hidl_cb) override { + Return<void> getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel( + Return<V1_0::ErrorStatus> prepareModel( const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel(model, actualCallback); @@ -856,15 +834,13 @@ } case DriverKind::OLD: { static const char name[] = "old"; - DeviceManager::get()->forTest_registerDevice( - nn::makeSharedDevice(name, new TestDriver11(name, kSuccess))); + DeviceManager::get()->forTest_registerDevice(name, new TestDriver11(name, kSuccess)); ASSERT_TRUE(selectDeviceByName(name)); break; } case DriverKind::NEW: { static const char name[] = "new"; - DeviceManager::get()->forTest_registerDevice( - nn::makeSharedDevice(name, new TestDriver13(name, kSuccess))); + DeviceManager::get()->forTest_registerDevice(name, new TestDriver13(name, kSuccess)); ASSERT_TRUE(selectDeviceByName(name)); break; } @@ -1128,8 +1104,8 @@ std::make_tuple(DriverKind::NEW, Success::FAIL_LAUNCH, Compute::FENCED)); -INSTANTIATE_TEST_SUITE_P(Unfenced, TimingTest, kTimingTestUnfencedValues); -INSTANTIATE_TEST_SUITE_P(Fenced, TimingTest, kTimingTestFencedValues); +INSTANTIATE_TEST_CASE_P(Unfenced, TimingTest, kTimingTestUnfencedValues); +INSTANTIATE_TEST_CASE_P(Fenced, TimingTest, kTimingTestFencedValues); } // namespace timing_tests @@ -1175,8 +1151,7 @@ using namespace test_drivers; static const char name[] = "driver11"; - DeviceManager::get()->forTest_registerDevice( - nn::makeSharedDevice(name, new TestDriver11(name, Success::PASS_BOTH))); + DeviceManager::get()->forTest_registerDevice(name, new TestDriver11(name, Success::PASS_BOTH)); ASSERT_TRUE(selectDeviceByName(name)); createAddMaxModel(&mModel, false); @@ -1192,8 +1167,7 @@ using namespace test_drivers; static const char name[] = "driver11"; - DeviceManager::get()->forTest_registerDevice( - nn::makeSharedDevice(name, new TestDriver11(name, Success::PASS_BOTH))); + DeviceManager::get()->forTest_registerDevice(name, new TestDriver11(name, Success::PASS_BOTH)); ASSERT_TRUE(selectDeviceByName(name)); createAddMaxModel(&mModel, true); @@ -1231,181 +1205,6 @@ ASSERT_TRUE(model->isValid()); } -TEST_F(IntrospectionControlTest, SlicingFullySupported) { - // This is needed before we have the CPU fallback path being treated as a Device. - if (DeviceManager::get()->getUseCpuOnly()) { - GTEST_SKIP(); - } - - using namespace test_drivers; - - static const char name[] = "driver11"; - DeviceManager::get()->forTest_registerDevice( - nn::makeSharedDevice(name, new TestDriver11(name, Success::PASS_BOTH))); - ASSERT_TRUE(selectDeviceByName(name)); - - createAddMulModel(&mModel, false); - EXPECT_TRUE(isSupportedOpListExpected({true, true})); -} - -void createCondModel(WrapperModel* model, bool dynamicRank) { - const auto dimensions = dynamicRank ? std::vector<uint32_t>{} : std::vector<uint32_t>{1}; - WrapperOperandType floatType(WrapperType::TENSOR_FLOAT32, dimensions); - WrapperOperandType boolType(WrapperType::TENSOR_BOOL8, {1}); - // Phase 1, operands - auto op1 = model->addOperand(&floatType); - auto op2 = model->addOperand(&boolType); - // Phase 2, operations - model->addOperation(ANEURALNETWORKS_LESS, {op1, op1}, {op2}); - // Phase 3, inputs and outputs - model->identifyInputsAndOutputs({op1}, {op2}); - model->finish(); -} - -void addReluOperation(WrapperModel* model, std::vector<uint32_t>* modelInputIndexes, - std::vector<uint32_t>* modelOutputIndexes, bool dynamicRank) { - const auto dimensions = dynamicRank ? std::vector<uint32_t>{} : std::vector<uint32_t>{1}; - WrapperOperandType type(WrapperType::TENSOR_FLOAT32, dimensions); - // Phase 1, operands - auto op1 = model->addOperand(&type); - auto op2 = model->addOperand(&type); - // Phase 2, operations - model->addOperation(ANEURALNETWORKS_RELU, {op1}, {op2}); - // Phase 3, inputs and outputs - modelInputIndexes->push_back(op1); - modelOutputIndexes->push_back(op2); -} - -void createReluModel(WrapperModel* model, bool dynamicRank) { - std::vector<uint32_t> modelInputIndexes, modelOutputIndexes; - addReluOperation(model, &modelInputIndexes, &modelOutputIndexes, dynamicRank); - model->identifyInputsAndOutputs(modelInputIndexes, modelOutputIndexes); - model->finish(); -} - -void addWhileOperation(std::vector<WrapperModel>* extraModels, WrapperModel* mainModel, - std::vector<uint32_t>* modelInputIndexes, - std::vector<uint32_t>* modelOutputIndexes, bool dynamicRank) { - const auto dimensions = dynamicRank ? std::vector<uint32_t>{} : std::vector<uint32_t>{1}; - WrapperOperandType floatType(WrapperType::TENSOR_FLOAT32, dimensions); - WrapperOperandType modelType(WrapperType::MODEL, {}); - - extraModels->emplace_back(); - extraModels->emplace_back(); - WrapperModel* condModel = &extraModels->at(extraModels->size() - 2); - WrapperModel* bodyModel = &extraModels->at(extraModels->size() - 1); - createCondModel(condModel, dynamicRank); - createReluModel(bodyModel, dynamicRank); - ASSERT_TRUE(condModel->isValid()); - ASSERT_TRUE(bodyModel->isValid()); - - // Phase 1, operands - const uint32_t op1 = mainModel->addOperand(&modelType); - const uint32_t op2 = mainModel->addOperand(&modelType); - const uint32_t op3 = mainModel->addOperand(&floatType); - const uint32_t op4 = mainModel->addOperand(&floatType); - mainModel->setOperandValueFromModel(op1, condModel); - mainModel->setOperandValueFromModel(op2, bodyModel); - // Phase 2, operations - mainModel->addOperation(ANEURALNETWORKS_WHILE, {op1, op2, op3}, {op4}); - // Phase 3, inputs and outputs - modelInputIndexes->push_back(op3); - modelOutputIndexes->push_back(op4); -} - -void createReluStaticWhileModel(std::vector<WrapperModel>* extraModels, WrapperModel* mainModel) { - std::vector<uint32_t> modelInputIndexes, modelOutputIndexes; - - // Operation supported in Android API level 27 - addReluOperation(mainModel, &modelInputIndexes, &modelOutputIndexes, /*dynamicRank=*/false); - // Operation supported in Android API level 30 - addWhileOperation(extraModels, mainModel, &modelInputIndexes, &modelOutputIndexes, - /*dynamicRank=*/false); - - mainModel->identifyInputsAndOutputs(modelInputIndexes, modelOutputIndexes); - mainModel->finish(); - ASSERT_TRUE(mainModel->isValid()); -} - -TEST_F(IntrospectionControlTest, ControlFlowNotSupported) { - // This is needed before we have the CPU fallback path being treated as a Device. - if (DeviceManager::get()->getUseCpuOnly()) { - GTEST_SKIP(); - } - - using namespace test_drivers; - - static const char name[] = "driver11"; - DeviceManager::get()->forTest_registerDevice( - nn::makeSharedDevice(name, new TestDriver11(name, Success::PASS_BOTH))); - ASSERT_TRUE(selectDeviceByName(name)); - - std::vector<WrapperModel> extraModels; - createReluStaticWhileModel(&extraModels, &mModel); - EXPECT_TRUE(isSupportedOpListExpected({true, false})); - - // Clear mModel early because it may reference `extraModels`. - mModel = WrapperModel{}; -} - -TEST_F(IntrospectionControlTest, ControlFlowSupported) { - // This is needed before we have the CPU fallback path being treated as a Device. - if (DeviceManager::get()->getUseCpuOnly()) { - GTEST_SKIP(); - } - - using namespace test_drivers; - - static const char name[] = "driver13"; - DeviceManager::get()->forTest_registerDevice( - nn::makeSharedDevice(name, new TestDriver13(name, Success::PASS_BOTH))); - ASSERT_TRUE(selectDeviceByName(name)); - - std::vector<WrapperModel> extraModels; - createReluStaticWhileModel(&extraModels, &mModel); - EXPECT_TRUE(isSupportedOpListExpected({true, true})); - - // Clear mModel early because it may reference `extraModels`. - mModel = WrapperModel{}; -} - -void createStaticWhileDynamicWhileModel(std::vector<WrapperModel>* extraModels, - WrapperModel* mainModel) { - std::vector<uint32_t> modelInputIndexes, modelOutputIndexes; - - // Operation supported in Android API level 30 - addWhileOperation(extraModels, mainModel, &modelInputIndexes, &modelOutputIndexes, - /*dynamicRank=*/false); - // Operation supported only by NNAPI runtime - addWhileOperation(extraModels, mainModel, &modelInputIndexes, &modelOutputIndexes, - /*dynamicRank=*/true); - - mainModel->identifyInputsAndOutputs(modelInputIndexes, modelOutputIndexes); - mainModel->finish(); - ASSERT_TRUE(mainModel->isValid()); -} - -TEST_F(IntrospectionControlTest, ControlFlowFailedToSlice) { - // This is needed before we have the CPU fallback path being treated as a Device. - if (DeviceManager::get()->getUseCpuOnly()) { - GTEST_SKIP(); - } - - using namespace test_drivers; - - static const char name[] = "driver13"; - DeviceManager::get()->forTest_registerDevice( - nn::makeSharedDevice(name, new TestDriver13(name, Success::PASS_BOTH))); - ASSERT_TRUE(selectDeviceByName(name)); - - std::vector<WrapperModel> extraModels; - createStaticWhileDynamicWhileModel(&extraModels, &mModel); - EXPECT_TRUE(isSupportedOpListExpected({false, false})); - - // Clear mModel early because it may reference `extraModels`. - mModel = WrapperModel{}; -} - // TODO(miaowang): add a test to make sure ANNCompilation_create() has CPU // fallback. // This test verifies that a device that could only handle ADD would correctly report that an
diff --git a/runtime/test/TestMain.cpp b/runtime/test/TestMain.cpp index 016c6e3..a8ce555 100644 --- a/runtime/test/TestMain.cpp +++ b/runtime/test/TestMain.cpp
@@ -16,6 +16,14 @@ #define LOG_TAG "NeuralNetworksTest" +#include "LogTestCaseToLogcat.h" +#include "TestNeuralNetworksWrapper.h" + +#ifndef NNTEST_ONLY_PUBLIC_API +#include "Manager.h" +#include "Utils.h" +#endif + #include <android-base/logging.h> #include <gtest/gtest.h> @@ -24,15 +32,6 @@ #include <sstream> #include <string> -#include "LogTestCaseToLogcat.h" -#include "TestNeuralNetworksWrapper.h" - -#ifndef NNTEST_ONLY_PUBLIC_API -#include <Utils.h> - -#include "Manager.h" -#endif - namespace { using namespace android::nn::test_wrapper; @@ -51,20 +50,27 @@ // non-public DeviceManager::setUseCpuOnly(); we assume the setting is always // false, and if we are asked to set it to true, we return 0 ("success") without // running tests. -static int test(bool useCpuOnly, Execution::ComputeMode computeMode) { +// +// EXCEPTION: If NNTEST_ONLY_PUBLIC_API is defined, then we cannot call +// non-public DeviceManager::setSyncExecHal(); we assume the setting is always +// true, and if we are asked to set it to false, we return 0 ("success") without +// running tests. +static int test(bool useCpuOnly, Execution::ComputeMode computeMode, bool allowSyncExecHal = true) { // NOTE: The test mapping configuration (packages/modules/NeuralNetworks/TEST_MAPPING) uses - // the value of 4 to only run pass 2 of the test, corresponding to - // "useCpuOnly = 0, computeMode = ComputeMode::ASYNC". + // the value of 1024 to only run pass 10 of the test, corresponding to + // "useCpuOnly = 0, computeMode = ComputeMode::ASYNC, allowSyncExecHal = 1". // If you change the bit representation here, also make the corresponding // change to the TEST_MAPPING file to run the equivalent pass of the test. - uint32_t passIndex = (useCpuOnly << 0) + (static_cast<uint32_t>(computeMode) << 1); + uint32_t passIndex = + (useCpuOnly << 0) + (static_cast<uint32_t>(computeMode) << 1) + (allowSyncExecHal << 3); #ifdef NNTEST_ONLY_PUBLIC_API - if (useCpuOnly) { + if (useCpuOnly || !allowSyncExecHal) { return 0; } #else android::nn::DeviceManager::get()->setUseCpuOnly(useCpuOnly); + android::nn::DeviceManager::get()->setSyncExecHal(allowSyncExecHal); #endif Execution::setComputeMode(computeMode); @@ -85,7 +91,7 @@ std::stringstream stream; stream << "useCpuOnly = " << useCpuOnly << ", computeMode = " << computeModeText() - << " // pass " << passIndex; + << ", allowSyncExecHal = " << allowSyncExecHal << " // pass " << passIndex; const std::string message = stream.str(); LOG(INFO) << message; std::cout << "[**********] " << message << std::endl; @@ -129,11 +135,19 @@ test(/*useCpuOnly=*/true, Execution::ComputeMode::ASYNC) | test(/*useCpuOnly=*/true, Execution::ComputeMode::SYNC); + // Now try disabling use of synchronous execution HAL. + // + // Whether or not the use of synchronous execution HAL is enabled should make no + // difference when useCpuOnly = true; we already ran test(true, *, true) above, + // so there's no reason to run test(true, *, false) now. + n |= test(/*useCpuOnly=*/false, Execution::ComputeMode::ASYNC, /*allowSyncExecHal=*/false) | + test(/*useCpuOnly=*/false, Execution::ComputeMode::SYNC, /*allowSyncExecHal=*/false); + // Now try execution using a burst. // // The burst path is off by default in these tests. This is the first case - // where it is turned on. "useCpuOnly" is irrelevant here because the burst - // path is separate. + // where it is turned on. Both "useCpuOnly" and "allowSyncExecHal" are + // irrelevant here because the burst path is separate from both. n |= test(/*useCpuOnly=*/false, Execution::ComputeMode::BURST); return n;
diff --git a/runtime/test/TestMemory.cpp b/runtime/test/TestMemory.cpp index 11b1442..122bde2 100644 --- a/runtime/test/TestMemory.cpp +++ b/runtime/test/TestMemory.cpp
@@ -16,14 +16,13 @@ #include "TestMemory.h" -#include <android-base/scopeguard.h> +#include "TestNeuralNetworksWrapper.h" + #include <gtest/gtest.h> #include <sys/mman.h> #include <sys/types.h> #include <unistd.h> -#include "TestNeuralNetworksWrapper.h" - using WrapperCompilation = ::android::nn::test_wrapper::Compilation; using WrapperExecution = ::android::nn::test_wrapper::Execution; using WrapperMemory = ::android::nn::test_wrapper::Memory; @@ -91,9 +90,6 @@ unlink(path); } -// Hardware buffers are an Android concept, which aren't necessarily -// available on other platforms such as ChromeOS, which also build NNAPI. -#if defined(__ANDROID__) TEST_F(MemoryTest, TestAHardwareBuffer) { const uint32_t offsetForMatrix2 = 20; const uint32_t offsetForMatrix3 = 200; @@ -107,8 +103,6 @@ }; AHardwareBuffer* buffer = nullptr; ASSERT_EQ(AHardwareBuffer_allocate(&desc, &buffer), 0); - auto allocateGuard = - android::base::make_scope_guard([buffer]() { AHardwareBuffer_release(buffer); }); void* bufferPtr = nullptr; ASSERT_EQ(AHardwareBuffer_lock(buffer, desc.usage, -1, NULL, &bufferPtr), 0); @@ -149,7 +143,8 @@ ASSERT_EQ(execution2.setOutput(0, actual, sizeof(Matrix3x4)), WrapperResult::NO_ERROR); ASSERT_EQ(execution2.compute(), WrapperResult::NO_ERROR); ASSERT_EQ(CompareMatrices(expected3, actual), 0); -} -#endif + AHardwareBuffer_release(buffer); + buffer = nullptr; +} } // end namespace
diff --git a/runtime/test/TestMemoryDomain.cpp b/runtime/test/TestMemoryDomain.cpp index 0f462bc..4a9c394 100644 --- a/runtime/test/TestMemoryDomain.cpp +++ b/runtime/test/TestMemoryDomain.cpp
@@ -14,9 +14,6 @@ * limitations under the License. */ -#include <HalInterfaces.h> -#include <SampleDriver.h> -#include <SampleDriverFull.h> #include <android/hardware/neuralnetworks/1.2/ADevice.h> #include <gtest/gtest.h> @@ -28,30 +25,29 @@ #include <utility> #include <vector> -#include "HalUtils.h" +#include "HalInterfaces.h" #include "Manager.h" #include "Memory.h" +#include "SampleDriver.h" +#include "SampleDriverFull.h" #include "TestNeuralNetworksWrapper.h" #include "TestUtils.h" using namespace android::nn; -namespace hardware = android::hardware; -using WrapperResult = test_wrapper::Result; +using namespace hal; +using Result = test_wrapper::Result; using Type = test_wrapper::Type; -using android::sp; -using android::nn::isAhwbBlob; namespace { // A buffer for test that does nothing. -class TestBuffer : public V1_3::IBuffer { +class TestBuffer : public IBuffer { public: - hardware::Return<V1_3::ErrorStatus> copyTo(const hardware::hidl_memory&) override { - return V1_3::ErrorStatus::DEVICE_UNAVAILABLE; + Return<ErrorStatus> copyTo(const hidl_memory&) override { + return ErrorStatus::DEVICE_UNAVAILABLE; } - hardware::Return<V1_3::ErrorStatus> copyFrom(const hardware::hidl_memory&, - const hardware::hidl_vec<uint32_t>&) override { - return V1_3::ErrorStatus::DEVICE_UNAVAILABLE; + Return<ErrorStatus> copyFrom(const hidl_memory&, const hidl_vec<uint32_t>&) override { + return ErrorStatus::DEVICE_UNAVAILABLE; } }; @@ -77,67 +73,64 @@ class TestDriverLatest : public sample_driver::SampleDriver { public: - TestDriverLatest(const char* name, std::set<V1_3::OperationType> supportedOperations, + TestDriverLatest(const char* name, std::set<OperationType> supportedOperations, AllocateReturn allocateReturn) : SampleDriver(name), kSupportedOperations(std::move(supportedOperations)), kAllocateReturn(allocateReturn) {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { android::nn::initVLogMask(); // Faster than cpu. - const V1_0::PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1}; - const V1_3::Capabilities capabilities = { + const PerformanceInfo kPerf = {.execTime = 0.1, .powerUsage = 0.1}; + const Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = kPerf, .relaxedFloat32toFloat16PerformanceTensor = kPerf, .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf), .ifPerformance = kPerf, .whilePerformance = kPerf}; - cb(V1_3::ErrorStatus::NONE, capabilities); - return hardware::Void(); + cb(ErrorStatus::NONE, capabilities); + return Void(); } - hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model, - getSupportedOperations_1_3_cb cb) override { + Return<void> getSupportedOperations_1_3(const Model& model, + getSupportedOperations_1_3_cb cb) override { // The tests will never use a referenced model. CHECK(model.referenced.size() == 0); std::vector<bool> supported(model.main.operations.size(), false); - std::transform(model.main.operations.begin(), model.main.operations.end(), - supported.begin(), [this](const V1_3::Operation& op) { - return kSupportedOperations.count(op.type) > 0; - }); - cb(V1_3::ErrorStatus::NONE, supported); - return hardware::Void(); + std::transform( + model.main.operations.begin(), model.main.operations.end(), supported.begin(), + [this](const Operation& op) { return kSupportedOperations.count(op.type) > 0; }); + cb(ErrorStatus::NONE, supported); + return Void(); } - hardware::Return<void> allocate(const V1_3::BufferDesc&, - const hardware::hidl_vec<sp<V1_3::IPreparedModel>>&, - const hardware::hidl_vec<V1_3::BufferRole>&, - const hardware::hidl_vec<V1_3::BufferRole>&, - allocate_cb cb) override { + Return<void> allocate(const BufferDesc&, const hidl_vec<sp<IPreparedModel>>&, + const hidl_vec<BufferRole>&, const hidl_vec<BufferRole>&, + allocate_cb cb) override { switch (kAllocateReturn) { case AllocateReturn::OK: - cb(V1_3::ErrorStatus::NONE, new TestBuffer(), mValidBufferToken++); - return hardware::Void(); + cb(ErrorStatus::NONE, new TestBuffer(), mValidBufferToken++); + return Void(); case AllocateReturn::BAD_IBUFFER: - cb(V1_3::ErrorStatus::NONE, nullptr, mValidBufferToken++); - return hardware::Void(); + cb(ErrorStatus::NONE, nullptr, mValidBufferToken++); + return Void(); case AllocateReturn::BAD_TOKEN: - cb(V1_3::ErrorStatus::NONE, new TestBuffer(), 0); - return hardware::Void(); + cb(ErrorStatus::NONE, new TestBuffer(), 0); + return Void(); case AllocateReturn::BAD_STATUS: - cb(V1_3::ErrorStatus::GENERAL_FAILURE, new TestBuffer(), mValidBufferToken++); - return hardware::Void(); + cb(ErrorStatus::GENERAL_FAILURE, new TestBuffer(), mValidBufferToken++); + return Void(); case AllocateReturn::NOT_SUPPORTED: - cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0); - return hardware::Void(); + cb(ErrorStatus::GENERAL_FAILURE, nullptr, 0); + return Void(); } LOG(FATAL) << "Invalid AllocateReturn code " << static_cast<int>(kAllocateReturn); - return hardware::Void(); + return Void(); } private: - const std::set<V1_3::OperationType> kSupportedOperations; + const std::set<OperationType> kSupportedOperations; const AllocateReturn kAllocateReturn; uint32_t mValidBufferToken = 1; }; @@ -150,24 +143,26 @@ // +--- SUB ---> temp ---+ // input2 ---+ // -void createTestModel(test_wrapper::Model* model) { +test_wrapper::Model createTestModel() { + test_wrapper::Model model; test_wrapper::OperandType tensorTypeFullySpecified(Type::TENSOR_FLOAT32, {1}); test_wrapper::OperandType tensorTypeDynamicShape(Type::TENSOR_FLOAT32, {0}); test_wrapper::OperandType actType(Type::INT32, {}); - uint32_t input0 = model->addOperand(&tensorTypeFullySpecified); - uint32_t input1 = model->addOperand(&tensorTypeFullySpecified); - uint32_t input2 = model->addOperand(&tensorTypeFullySpecified); - uint32_t temp = model->addOperand(&tensorTypeFullySpecified); - uint32_t output0 = model->addOperand(&tensorTypeFullySpecified); - uint32_t output1 = model->addOperand(&tensorTypeDynamicShape); - uint32_t act = model->addOperand(&actType); + uint32_t input0 = model.addOperand(&tensorTypeFullySpecified); + uint32_t input1 = model.addOperand(&tensorTypeFullySpecified); + uint32_t input2 = model.addOperand(&tensorTypeFullySpecified); + uint32_t temp = model.addOperand(&tensorTypeFullySpecified); + uint32_t output0 = model.addOperand(&tensorTypeFullySpecified); + uint32_t output1 = model.addOperand(&tensorTypeDynamicShape); + uint32_t act = model.addOperand(&actType); int32_t activation = 0; - model->setOperandValue(act, &activation, sizeof(int32_t)); - model->addOperation(ANEURALNETWORKS_ADD, {input0, input1, act}, {output0}); - model->addOperation(ANEURALNETWORKS_SUB, {input1, input2, act}, {temp}); - model->addOperation(ANEURALNETWORKS_MUL, {output0, temp, act}, {output1}); - model->identifyInputsAndOutputs({input0, input1, input2}, {output0, output1}); - EXPECT_EQ(model->finish(), WrapperResult::NO_ERROR); + model.setOperandValue(act, &activation, sizeof(int32_t)); + model.addOperation(ANEURALNETWORKS_ADD, {input0, input1, act}, {output0}); + model.addOperation(ANEURALNETWORKS_SUB, {input1, input2, act}, {temp}); + model.addOperation(ANEURALNETWORKS_MUL, {output0, temp, act}, {output1}); + model.identifyInputsAndOutputs({input0, input1, input2}, {output0, output1}); + EXPECT_EQ(model.finish(), Result::NO_ERROR); + return model; } class MemoryDomainTestBase : public ::testing::Test { @@ -177,7 +172,6 @@ if (DeviceManager::get()->getUseCpuOnly()) { GTEST_SKIP(); } - createTestModel(&mModel); // Clear the device list. DeviceManager::get()->forTest_setDevices({}); } @@ -206,14 +200,14 @@ std::vector<const ANeuralNetworksDevice*> devices(deviceNames.size()); std::transform(deviceNames.begin(), deviceNames.end(), devices.begin(), [&deviceMap](const std::string& name) { return deviceMap.at(name); }); - WrapperResult result; + Result result; std::tie(result, compilation) = - test_wrapper::Compilation::createForDevices(&mModel, devices); - EXPECT_EQ(result, WrapperResult::NO_ERROR); + test_wrapper::Compilation::createForDevices(&kModel, devices); + EXPECT_EQ(result, Result::NO_ERROR); } else { - compilation = test_wrapper::Compilation(&mModel); + compilation = test_wrapper::Compilation(&kModel); } - EXPECT_EQ(compilation.finish(), WrapperResult::NO_ERROR); + EXPECT_EQ(compilation.finish(), Result::NO_ERROR); return compilation; } @@ -239,9 +233,11 @@ return {n, test_wrapper::Memory(memory)}; } - test_wrapper::Model mModel; + static const test_wrapper::Model kModel; }; +const test_wrapper::Model MemoryDomainTestBase::kModel = createTestModel(); + // Test memory domain with the following parameters // - If true, use a V1_2 driver, otherwise, use the latest version; // - If true, compile with explicit device list, otherwise, compile in the default way; @@ -252,20 +248,18 @@ public ::testing::WithParamInterface<MemoryDomainTestParam> { protected: // If kUseV1_2Driver, allocateReturn must be AllocateReturn::NOT_SUPPORTED. - void createAndRegisterDriver(const char* name, - std::set<V1_3::OperationType> supportedOperations, + void createAndRegisterDriver(const char* name, std::set<OperationType> supportedOperations, AllocateReturn allocateReturn) { + sp<V1_0::IDevice> driver; if (kUseV1_2Driver) { CHECK(allocateReturn == AllocateReturn::NOT_SUPPORTED); const sp<TestDriverLatest> testDriver = new TestDriverLatest(name, supportedOperations, AllocateReturn::NOT_SUPPORTED); - DeviceManager::get()->forTest_registerDevice( - makeSharedDevice(name, new V1_2::ADevice(testDriver))); + driver = new V1_2::ADevice(testDriver); } else { - DeviceManager::get()->forTest_registerDevice(makeSharedDevice( - name, - new TestDriverLatest(name, std::move(supportedOperations), allocateReturn))); + driver = new TestDriverLatest(name, std::move(supportedOperations), allocateReturn); } + DeviceManager::get()->forTest_registerDevice(name, driver); } // If not kCompileWithExplicitDeviceList, the input argument "deviceNames" is ignored. @@ -282,16 +276,11 @@ const AllocateReturn kAllocateReturn = std::get<2>(GetParam()); }; -bool isAshmem(const SharedMemory& memory) { - return memory != nullptr && std::holds_alternative<Memory::Ashmem>(memory->handle); -} - // Test device memory allocation on a compilation with only a single partition. TEST_P(MemoryDomainTest, SinglePartition) { - createAndRegisterDriver( - "test_driver", - {V1_3::OperationType::ADD, V1_3::OperationType::SUB, V1_3::OperationType::MUL}, - kAllocateReturn); + createAndRegisterDriver("test_driver", + {OperationType::ADD, OperationType::SUB, OperationType::MUL}, + kAllocateReturn); auto compilation = createCompilation({"test_driver"}); ASSERT_NE(compilation.getHandle(), nullptr); @@ -299,7 +288,7 @@ if (kAllocateReturn == AllocateReturn::OK) { // The memory should be backed by the IBuffer returned from the driver. ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR); - const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get()); + const Memory* m = reinterpret_cast<const Memory*>(memory.get()); ASSERT_NE(m, nullptr); EXPECT_NE(m->getIBuffer(), nullptr); } else { @@ -309,15 +298,15 @@ } else { // The memory should fallback to ashmem or blob ahwb based on the driver version. ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR); - const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get()); + const Memory* m = reinterpret_cast<const Memory*>(memory.get()); ASSERT_NE(m, nullptr); EXPECT_EQ(m->getIBuffer(), nullptr); - const auto& memory = m->getMemory(); - EXPECT_TRUE(validate(memory).ok()); + const auto& hidlMemory = m->getHidlMemory(); + EXPECT_TRUE(hidlMemory.valid()); if (kUseV1_2Driver) { - EXPECT_TRUE(isAshmem(memory)); + EXPECT_EQ(hidlMemory.name(), "ashmem"); } else { - EXPECT_TRUE(isAhwbBlob(memory)); + EXPECT_EQ(hidlMemory.name(), "hardware_buffer_blob"); } } } @@ -325,9 +314,9 @@ // Test device memory allocation on a compilation with multiple partitions. TEST_P(MemoryDomainTest, MultiplePartitions) { - createAndRegisterDriver("test_driver_add", {V1_3::OperationType::ADD}, kAllocateReturn); - createAndRegisterDriver("test_driver_sub", {V1_3::OperationType::SUB}, kAllocateReturn); - createAndRegisterDriver("test_driver_mul", {V1_3::OperationType::MUL}, kAllocateReturn); + createAndRegisterDriver("test_driver_add", {OperationType::ADD}, kAllocateReturn); + createAndRegisterDriver("test_driver_sub", {OperationType::SUB}, kAllocateReturn); + createAndRegisterDriver("test_driver_mul", {OperationType::MUL}, kAllocateReturn); auto compilation = createCompilation({"test_driver_add", "test_driver_sub", "test_driver_mul"}); ASSERT_NE(compilation.getHandle(), nullptr); @@ -337,7 +326,7 @@ if (kAllocateReturn == AllocateReturn::OK) { // The memory should be backed by the IBuffer returned from the driver. ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR); - const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get()); + const Memory* m = reinterpret_cast<const Memory*>(memory.get()); ASSERT_NE(m, nullptr); EXPECT_NE(m->getIBuffer(), nullptr); } else { @@ -347,15 +336,15 @@ } else { // The memory should fallback to ashmem or blob ahwb based on the driver version. ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR); - const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get()); + const Memory* m = reinterpret_cast<const Memory*>(memory.get()); ASSERT_NE(m, nullptr); EXPECT_EQ(m->getIBuffer(), nullptr); - const auto& memory = m->getMemory(); - EXPECT_TRUE(validate(memory).ok()); + const auto& hidlMemory = m->getHidlMemory(); + EXPECT_TRUE(hidlMemory.valid()); if (kUseV1_2Driver) { - EXPECT_TRUE(isAshmem(memory)); + EXPECT_EQ(hidlMemory.name(), "ashmem"); } else { - EXPECT_TRUE(isAhwbBlob(memory)); + EXPECT_EQ(hidlMemory.name(), "hardware_buffer_blob"); } } } @@ -371,15 +360,15 @@ } else { // The memory should fallback to ashmem or blob ahwb based on the driver version. ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR); - const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get()); + const Memory* m = reinterpret_cast<const Memory*>(memory.get()); ASSERT_NE(m, nullptr); EXPECT_EQ(m->getIBuffer(), nullptr); - const auto& memory = m->getMemory(); - EXPECT_TRUE(validate(memory).ok()); + const auto& hidlMemory = m->getHidlMemory(); + EXPECT_TRUE(hidlMemory.valid()); if (kUseV1_2Driver) { - EXPECT_TRUE(isAshmem(memory)); + EXPECT_EQ(hidlMemory.name(), "ashmem"); } else { - EXPECT_TRUE(isAhwbBlob(memory)); + EXPECT_EQ(hidlMemory.name(), "hardware_buffer_blob"); } } } @@ -394,15 +383,15 @@ } else { // The memory should fallback to ashmem or blob ahwb based on the driver version. ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR); - const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get()); + const Memory* m = reinterpret_cast<const Memory*>(memory.get()); ASSERT_NE(m, nullptr); EXPECT_EQ(m->getIBuffer(), nullptr); - const auto& memory = m->getMemory(); - EXPECT_TRUE(validate(memory).ok()); + const auto& hidlMemory = m->getHidlMemory(); + EXPECT_TRUE(hidlMemory.valid()); if (kUseV1_2Driver) { - EXPECT_TRUE(isAshmem(memory)); + EXPECT_EQ(hidlMemory.name(), "ashmem"); } else { - EXPECT_TRUE(isAhwbBlob(memory)); + EXPECT_EQ(hidlMemory.name(), "hardware_buffer_blob"); } } } @@ -410,10 +399,9 @@ // Test device memory allocation with dynamic shape. TEST_P(MemoryDomainTest, DynamicShape) { - createAndRegisterDriver( - "test_driver", - {V1_3::OperationType::ADD, V1_3::OperationType::SUB, V1_3::OperationType::MUL}, - kAllocateReturn); + createAndRegisterDriver("test_driver", + {OperationType::ADD, OperationType::SUB, OperationType::MUL}, + kAllocateReturn); auto compilation = createCompilation({"test_driver"}); ASSERT_NE(compilation.getHandle(), nullptr); @@ -421,7 +409,7 @@ if (kAllocateReturn == AllocateReturn::OK) { // The memory should be backed by the IBuffer returned from the driver. ASSERT_EQ(n, ANEURALNETWORKS_NO_ERROR); - const RuntimeMemory* m = reinterpret_cast<const RuntimeMemory*>(memory.get()); + const Memory* m = reinterpret_cast<const Memory*>(memory.get()); ASSERT_NE(m, nullptr); EXPECT_NE(m->getIBuffer(), nullptr); } else { @@ -434,25 +422,19 @@ testing::Values(AllocateReturn::OK, AllocateReturn::BAD_TOKEN, AllocateReturn::BAD_IBUFFER, AllocateReturn::BAD_STATUS, AllocateReturn::NOT_SUPPORTED); -INSTANTIATE_TEST_SUITE_P(DeviceVersionV1_2, MemoryDomainTest, - testing::Combine(testing::Values(true), testing::Bool(), - testing::Values(AllocateReturn::NOT_SUPPORTED))); - -// Hardware buffers are an Android concept, which aren't necessarily -// available on other platforms such as ChromeOS, which also build NNAPI. -// When using the latest driver, memory is allocated via hardware buffers, -// which will fail on non-android platforms. -#if defined(__ANDROID__) -INSTANTIATE_TEST_SUITE_P(DeviceVersionLatest, MemoryDomainTest, - testing::Combine(testing::Values(false), testing::Bool(), - kAllocateReturnChoices)); +INSTANTIATE_TEST_CASE_P(DeviceVersionLatest, MemoryDomainTest, + testing::Combine(testing::Values(false), testing::Bool(), + kAllocateReturnChoices)); +INSTANTIATE_TEST_CASE_P(DeviceVersionV1_2, MemoryDomainTest, + testing::Combine(testing::Values(true), testing::Bool(), + testing::Values(AllocateReturn::NOT_SUPPORTED))); class MemoryDomainCopyTest : public MemoryDomainTestBase {}; TEST_F(MemoryDomainCopyTest, MemoryCopyTest) { - DeviceManager::get()->forTest_registerDevice(makeSharedDevice( - "test_driver", new sample_driver::SampleDriverFull( - "test_driver", {.execTime = 0.1f, .powerUsage = 0.1f}))); + sp<sample_driver::SampleDriverFull> driver(new sample_driver::SampleDriverFull( + "test_driver", {.execTime = 0.1f, .powerUsage = 0.1f})); + DeviceManager::get()->forTest_registerDevice("test_driver", driver); auto compilation = createCompilation({"test_driver"}); ASSERT_NE(compilation.getHandle(), nullptr); @@ -478,6 +460,5 @@ EXPECT_EQ(ashmem2->dataAs<float>()[0], initValue1); } -#endif } // namespace
diff --git a/runtime/test/TestMemoryInternal.cpp b/runtime/test/TestMemoryInternal.cpp index 67e282b..5657912 100644 --- a/runtime/test/TestMemoryInternal.cpp +++ b/runtime/test/TestMemoryInternal.cpp
@@ -18,17 +18,18 @@ // files, including NN API HIDL definitions. // It is not part of CTS. +#include "TestMemory.h" + +#include "Manager.h" +#include "Memory.h" +#include "TestNeuralNetworksWrapper.h" + #include <android/sharedmem.h> #include <gtest/gtest.h> #include <fstream> #include <string> -#include "Manager.h" -#include "Memory.h" -#include "TestMemory.h" -#include "TestNeuralNetworksWrapper.h" - using WrapperCompilation = ::android::nn::test_wrapper::Compilation; using WrapperExecution = ::android::nn::test_wrapper::Execution; using WrapperMemory = ::android::nn::test_wrapper::Memory;
diff --git a/runtime/test/TestNeuralNetworksWrapper.h b/runtime/test/TestNeuralNetworksWrapper.h index f02c75b..ae40121 100644 --- a/runtime/test/TestNeuralNetworksWrapper.h +++ b/runtime/test/TestNeuralNetworksWrapper.h
@@ -33,10 +33,6 @@ #include "NeuralNetworksWrapper.h" #include "NeuralNetworksWrapperExtensions.h" -#ifndef __NNAPI_FL5_MIN_ANDROID_API__ -#define __NNAPI_FL5_MIN_ANDROID_API__ __ANDROID_API_FUTURE__ -#endif - namespace android { namespace nn { namespace test_wrapper { @@ -312,46 +308,6 @@ Result finish() { return static_cast<Result>(ANeuralNetworksCompilation_finish(mCompilation)); } - Result getPreferredMemoryAlignmentForInput(uint32_t index, uint32_t* alignment) const { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - return static_cast<Result>( - NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput( - mCompilation, index, alignment))); - } else { - return Result::FEATURE_LEVEL_TOO_LOW; - } - }; - - Result getPreferredMemoryPaddingForInput(uint32_t index, uint32_t* padding) const { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - return static_cast<Result>( - NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput( - mCompilation, index, padding))); - } else { - return Result::FEATURE_LEVEL_TOO_LOW; - } - }; - - Result getPreferredMemoryAlignmentForOutput(uint32_t index, uint32_t* alignment) const { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - return static_cast<Result>( - NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput( - mCompilation, index, alignment))); - } else { - return Result::FEATURE_LEVEL_TOO_LOW; - } - }; - - Result getPreferredMemoryPaddingForOutput(uint32_t index, uint32_t* padding) const { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - return static_cast<Result>( - NNAPI_CALL(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput( - mCompilation, index, padding))); - } else { - return Result::FEATURE_LEVEL_TOO_LOW; - } - }; - ANeuralNetworksCompilation* getHandle() const { return mCompilation; } protected: @@ -434,24 +390,6 @@ return static_cast<Result>(ANeuralNetworksExecution_setLoopTimeout(mExecution, duration)); } - Result enableInputAndOutputPadding(bool enable) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - return static_cast<Result>( - ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, enable)); - } else { - return Result::FEATURE_LEVEL_TOO_LOW; - } - } - - Result setReusable(bool reusable) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - return static_cast<Result>( - NNAPI_CALL(ANeuralNetworksExecution_setReusable(mExecution, reusable))); - } else { - return Result::FEATURE_LEVEL_TOO_LOW; - } - } - Result startCompute(Event* event) { ANeuralNetworksEvent* ev = nullptr; Result result = static_cast<Result>(ANeuralNetworksExecution_startCompute(mExecution, &ev)); @@ -471,23 +409,8 @@ return result; } - // By default, compute() uses the synchronous API. Either an argument or - // setComputeMode() can be used to change the behavior of compute() to - // either: - // - use the asynchronous or fenced API and then wait for computation to complete - // or - // - use the burst API - // Returns the previous ComputeMode. - enum class ComputeMode { SYNC, ASYNC, BURST, FENCED }; - static ComputeMode setComputeMode(ComputeMode mode) { - ComputeMode oldComputeMode = mComputeMode; - mComputeMode = mode; - return oldComputeMode; - } - static ComputeMode getComputeMode() { return mComputeMode; } - - Result compute(ComputeMode computeMode = mComputeMode) { - switch (computeMode) { + Result compute() { + switch (mComputeMode) { case ComputeMode::SYNC: { return static_cast<Result>(ANeuralNetworksExecution_compute(mExecution)); } @@ -532,6 +455,19 @@ return Result::BAD_DATA; } + // By default, compute() uses the synchronous API. setComputeMode() can be + // used to change the behavior of compute() to either: + // - use the asynchronous API and then wait for computation to complete + // or + // - use the burst API + // Returns the previous ComputeMode. + enum class ComputeMode { SYNC, ASYNC, BURST, FENCED }; + static ComputeMode setComputeMode(ComputeMode mode) { + ComputeMode oldComputeMode = mComputeMode; + mComputeMode = mode; + return oldComputeMode; + } + Result getOutputOperandDimensions(uint32_t index, std::vector<uint32_t>* dimensions) { uint32_t rank = 0; Result result = static_cast<Result>(
diff --git a/runtime/test/TestOpenmpSettings.cpp b/runtime/test/TestOpenmpSettings.cpp index 0720745..af50c63 100644 --- a/runtime/test/TestOpenmpSettings.cpp +++ b/runtime/test/TestOpenmpSettings.cpp
@@ -14,11 +14,11 @@ * limitations under the License. */ -#include <CpuExecutor.h> +#include "CpuExecutor.h" + #include <gtest/gtest.h> #include <omp.h> #include <unistd.h> - #include <algorithm> #include <memory> #include <random>
diff --git a/runtime/test/TestOperandExtraParams.cpp b/runtime/test/TestOperandExtraParams.cpp index 3f7f969..ce7f589 100644 --- a/runtime/test/TestOperandExtraParams.cpp +++ b/runtime/test/TestOperandExtraParams.cpp
@@ -14,16 +14,15 @@ * limitations under the License. */ -#include <gtest/gtest.h> - #include "NeuralNetworks.h" #include "NeuralNetworksOEM.h" #include "NeuralNetworksWrapper.h" - #ifndef NNTEST_ONLY_PUBLIC_API -#include <Utils.h> +#include "Utils.h" #endif +#include <gtest/gtest.h> + namespace { using namespace android::nn::wrapper;
diff --git a/runtime/test/TestPartitioning.cpp b/runtime/test/TestPartitioning.cpp index 6d239b2..1a2ea21 100644 --- a/runtime/test/TestPartitioning.cpp +++ b/runtime/test/TestPartitioning.cpp
@@ -14,36 +14,32 @@ * limitations under the License. */ -#include <ControlFlow.h> -#include <HalInterfaces.h> -#include <SampleDriver.h> -#include <Utils.h> -#include <ValidateHal.h> #include <gtest/gtest.h> #include <algorithm> #include <filesystem> #include <functional> -#include <iostream> #include <map> #include <memory> -#include <numeric> #include <queue> #include <set> #include <string> -#include <tuple> #include <type_traits> #include <utility> #include <vector> #include "CompilationBuilder.h" +#include "ControlFlow.h" #include "ExecutionPlan.h" -#include "HalUtils.h" +#include "HalInterfaces.h" #include "Manager.h" #include "ModelBuilder.h" #include "NeuralNetworks.h" #include "NeuralNetworksOEM.h" +#include "SampleDriver.h" #include "TestNeuralNetworksWrapper.h" +#include "Utils.h" +#include "ValidateHal.h" // Uncomment the following line to generate some debugging output that // may be useful when analyzing failures: @@ -148,53 +144,51 @@ namespace { -namespace hardware = android::hardware; -namespace V1_0 = ::android::hardware::neuralnetworks::V1_0; -namespace V1_1 = ::android::hardware::neuralnetworks::V1_1; -namespace V1_2 = ::android::hardware::neuralnetworks::V1_2; -namespace V1_3 = ::android::hardware::neuralnetworks::V1_3; +using namespace android::nn::hal; using CompilationBuilder = ::android::nn::CompilationBuilder; +using Deadline = ::android::nn::Deadline; using Device = ::android::nn::Device; using DeviceManager = ::android::nn::DeviceManager; using ExecutePreference = ::android::nn::test_wrapper::ExecutePreference; using ExecutePriority = ::android::nn::test_wrapper::ExecutePriority; using ExecutionPlan = ::android::nn::ExecutionPlan; using ExecutionStep = ::android::nn::ExecutionStep; -using HalCacheToken = ::android::nn::HalCacheToken; using HalVersion = ::android::nn::HalVersion; using HidlModel = V1_3::Model; -using IOType = ::android::nn::IOType; using LogicalStep = ::android::nn::LogicalStep; using ModelBuilder = ::android::nn::ModelBuilder; -using Operand = ::android::nn::Operand; -using Operation = ::android::nn::Operation; -using OptionalTimePoint = ::android::nn::OptionalTimePoint; using Result = ::android::nn::test_wrapper::Result; using SampleDriver = ::android::nn::sample_driver::SampleDriver; -using SharedDevice = ::android::nn::SharedDevice; -using SourceOperandIndex = ::android::nn::SourceOperandIndex; -using StepRole = ::android::nn::StepRole; using WrapperCompilation = ::android::nn::test_wrapper::Compilation; -using WrapperExecution = ::android::nn::test_wrapper::Execution; using WrapperModel = ::android::nn::test_wrapper::Model; using WrapperOperandType = ::android::nn::test_wrapper::OperandType; using WrapperSymmPerChannelQuantParams = ::android::nn::test_wrapper::SymmPerChannelQuantParams; using WrapperType = ::android::nn::test_wrapper::Type; -using android::sp; -void update(V1_3::Capabilities* capabilities, V1_3::OperandType type, float perf) { - V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf}; +template <typename T> +using MQDescriptorSync = ::android::hardware::MQDescriptorSync<T>; + +constexpr Timing kBadTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; + +Capabilities makeCapabilities(float perf) { + PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf}; + return {.relaxedFloat32toFloat16PerformanceScalar = perfInfo, + .relaxedFloat32toFloat16PerformanceTensor = perfInfo, + .operandPerformance = + ::android::nn::nonExtensionOperandPerformance<HalVersion::V1_3>(perfInfo), + .ifPerformance = perfInfo, + .whilePerformance = perfInfo}; +}; + +void update(Capabilities* capabilities, OperandType type, float perf) { + PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf}; ::android::nn::update(&capabilities->operandPerformance, type, perfInfo); } -float lookupExecTime(const V1_3::Capabilities& capabilities, V1_3::OperandType type) { +float lookupExecTime(const Capabilities& capabilities, OperandType type) { return ::android::nn::lookup(capabilities.operandPerformance, type).execTime; } -HalVersion min(HalVersion a, HalVersion b) { - return int32_t(a) < int32_t(b) ? a : b; -} - const uint32_t kNumFuseCodes = 4; const uint32_t kBadOperation = ~0; @@ -223,16 +217,16 @@ const uint32_t kFirstEncodingV1_3 = kFirstEncodingHARD_SWISH; const uint32_t kLastEncodingV1_3 = kFirstEncodingHARD_SWISH; -const std::map<V1_3::OperationType, uint32_t> operationToFirstEncoding = { - {V1_3::OperationType::ADD, kFirstEncodingADD}, - {V1_3::OperationType::MUL, kFirstEncodingMUL}, - {V1_3::OperationType::DIV, kFirstEncodingDIV}, - {V1_3::OperationType::SUB, kFirstEncodingSUB}, - {V1_3::OperationType::MAXIMUM, kFirstEncodingMAXIMUM}, - {V1_3::OperationType::MINIMUM, kFirstEncodingMINIMUM}, - {V1_3::OperationType::POW, kFirstEncodingPOW}, - {V1_3::OperationType::PRELU, kFirstEncodingPRELU}, - {V1_3::OperationType::HARD_SWISH, kFirstEncodingHARD_SWISH}, +const std::map<OperationType, uint32_t> operationToFirstEncoding = { + {OperationType::ADD, kFirstEncodingADD}, + {OperationType::MUL, kFirstEncodingMUL}, + {OperationType::DIV, kFirstEncodingDIV}, + {OperationType::SUB, kFirstEncodingSUB}, + {OperationType::MAXIMUM, kFirstEncodingMAXIMUM}, + {OperationType::MINIMUM, kFirstEncodingMINIMUM}, + {OperationType::POW, kFirstEncodingPOW}, + {OperationType::PRELU, kFirstEncodingPRELU}, + {OperationType::HARD_SWISH, kFirstEncodingHARD_SWISH}, }; // Sorted in reverse order (std::greater) so that we can use map::lower_bound to @@ -253,20 +247,20 @@ // Look up the operation with the specified index in a graph, and return the // operation encoding; or, if for some reason this is not one of the encoded // operations, then return kBadOperation. -uint32_t lookupOperation(std::function<const V1_3::Operation&(uint32_t)> getOperation, - std::function<const V1_3::Operand&(uint32_t)> getOperand, +uint32_t lookupOperation(std::function<const Operation&(uint32_t)> getOperation, + std::function<const Operand&(uint32_t)> getOperand, std::function<const uint8_t*(uint32_t)> getValue, uint32_t operationIndex) { - const V1_3::Operation& operation = getOperation(operationIndex); + const Operation& operation = getOperation(operationIndex); switch (operation.type) { - case V1_3::OperationType::ADD: - case V1_3::OperationType::MUL: - case V1_3::OperationType::DIV: - case V1_3::OperationType::SUB: { + case OperationType::ADD: + case OperationType::MUL: + case OperationType::DIV: + case OperationType::SUB: { // input2 is the fused activation function - const V1_3::Operand& input2 = getOperand(operation.inputs[2]); - if ((input2.type == V1_3::OperandType::INT32) && - (input2.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY)) { + const Operand& input2 = getOperand(operation.inputs[2]); + if ((input2.type == OperandType::INT32) && + (input2.lifetime == OperandLifeTime::CONSTANT_COPY)) { int32_t value; CHECK_EQ(sizeof(value), input2.location.length); memcpy(&value, getValue(input2.location.offset), input2.location.length); @@ -285,15 +279,11 @@ return kBadOperation; } -uint32_t lookupOperation(const HidlModel& model, const V1_3::Subgraph& subgraph, +uint32_t lookupOperation(const HidlModel& model, const Subgraph& subgraph, uint32_t operationIndex) { return lookupOperation( - [&subgraph](uint32_t index) -> const V1_3::Operation& { - return subgraph.operations[index]; - }, - [&subgraph](uint32_t index) -> const V1_3::Operand& { - return subgraph.operands[index]; - }, + [&subgraph](uint32_t index) -> const Operation& { return subgraph.operations[index]; }, + [&subgraph](uint32_t index) -> const Operand& { return subgraph.operands[index]; }, [&model](uint32_t offset) { return &model.operandValues[offset]; }, operationIndex); } @@ -301,11 +291,12 @@ // This is a debugging utility function void dump(const char* name, const ModelBuilder* model) { const HidlModel hidlModel = model->makeHidlModel(); - std::cout << name << ": " << hidlModel << std::endl; - std::cout << "inputs: " << hidlModel.main.inputIndexes << std::endl; - std::cout << "outputs: " << hidlModel.main.outputIndexes << std::endl; + std::cout << name << ": " << toString(hidlModel) << std::endl; + std::cout << "inputs: " << toString(hidlModel.main.inputIndexes) << std::endl; + std::cout << "outputs: " << toString(hidlModel.main.outputIndexes) << std::endl; for (size_t i = 0, e = hidlModel.main.operations.size(); i < e; i++) { - std::cout << "operation[" << i << "]: " << hidlModel.main.operations[i] << std::endl; + std::cout << "operation[" << i << "]: " << toString(hidlModel.main.operations[i]) + << std::endl; } } #endif @@ -318,6 +309,52 @@ // operation kind K corresponds to the bit (1 << K). The other operations are // represented by a set of OperationType. class PartitioningDriver : public SampleDriver { + private: + // Dummy class -- a prepared model must not be nullptr. + class PartitioningPreparedModel : public IPreparedModel { + public: + Return<V1_0::ErrorStatus> execute(const V1_0::Request&, + const sp<V1_0::IExecutionCallback>&) override { + return V1_0::ErrorStatus::DEVICE_UNAVAILABLE; + } + Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, MeasureTiming, + const sp<V1_2::IExecutionCallback>&) override { + return V1_0::ErrorStatus::DEVICE_UNAVAILABLE; + } + Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, MeasureTiming, + const OptionalTimePoint&, + const OptionalTimeoutDuration&, + const sp<V1_3::IExecutionCallback>&) override { + return V1_3::ErrorStatus::DEVICE_UNAVAILABLE; + } + Return<void> executeSynchronously(const V1_0::Request&, MeasureTiming, + executeSynchronously_cb cb) override { + cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming); + return Void(); + } + Return<void> executeSynchronously_1_3(const V1_3::Request&, MeasureTiming, + const OptionalTimePoint&, + const OptionalTimeoutDuration&, + executeSynchronously_1_3_cb cb) override { + cb(V1_3::ErrorStatus::DEVICE_UNAVAILABLE, {}, kBadTiming); + return Void(); + } + Return<void> configureExecutionBurst( + const sp<V1_2::IBurstCallback>& /*callback*/, + const MQDescriptorSync<V1_2::FmqRequestDatum>& /*requestChannel*/, + const MQDescriptorSync<V1_2::FmqResultDatum>& /*resultChannel*/, + configureExecutionBurst_cb cb) override { + cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, nullptr); + return Void(); + } + Return<void> executeFenced(const Request&, const hidl_vec<hidl_handle>&, MeasureTiming, + const OptionalTimePoint&, const OptionalTimeoutDuration&, + const OptionalTimeoutDuration&, executeFenced_cb cb) { + cb(ErrorStatus::DEVICE_UNAVAILABLE, hidl_handle(nullptr), nullptr); + return Void(); + } + }; + public: enum OEM { OEMNo, // rejected by getSupportedOperations and prepareModel @@ -325,122 +362,94 @@ OEMYes, // accepted by getSupportedOperations and prepareModel }; - PartitioningDriver(const char* name, const char* version, V1_3::Capabilities capabilities, + PartitioningDriver(const char* name, const char* version, Capabilities capabilities, uint32_t operationMask, OEM oem = OEMNo, - std::set<V1_3::OperationType> operationTypes = {}) + std::set<OperationType> operationTypes = {}) : SampleDriver(name), mVersionString(version), mCapabilities(capabilities), mOperationMask(operationMask), mOEM(oem), mOperationTypes(std::move(operationTypes)) { - CHECK_EQ(mOperationTypes.count(V1_3::OperationType::OEM_OPERATION), size_t(0)); - if (operationMask) { - std::for_each(mOperationTypes.begin(), mOperationTypes.end(), - [](V1_3::OperationType type) { - CHECK_EQ(operationToFirstEncoding.count(type), size_t(0)); - }); - } + CHECK_EQ(mOperationTypes.count(OperationType::OEM_OPERATION), size_t(0)); + std::for_each(mOperationTypes.begin(), mOperationTypes.end(), [](OperationType type) { + CHECK_EQ(operationToFirstEncoding.count(type), size_t(0)); + }); } ~PartitioningDriver() override {} - hardware::Return<void> getVersionString(getVersionString_cb cb) override { + Return<void> getVersionString(getVersionString_cb cb) override { cb(V1_0::ErrorStatus::NONE, mVersionString); - return hardware::Void(); + return Void(); } - hardware::Return<V1_3::ErrorStatus> prepareModel_1_3( - const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, - const V1_3::OptionalTimePoint& deadline, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token, - const sp<V1_3::IPreparedModelCallback>& callback) override { - if (mOEM == OEMIndecisive) { + Return<V1_3::ErrorStatus> prepareModel_1_3( + const Model& model, ExecutionPreference, Priority, const OptionalTimePoint&, + const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&, + const sp<V1_3::IPreparedModelCallback>& cb) override { + V1_3::ErrorStatus status = V1_3::ErrorStatus::NONE; + if (mOEM != OEMYes) { for (const auto& operation : model.main.operations) { - if (operation.type == V1_3::OperationType::OEM_OPERATION) { - callback->notify_1_3(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr); - return V1_3::ErrorStatus::INVALID_ARGUMENT; + if (operation.type == OperationType::OEM_OPERATION) { + status = V1_3::ErrorStatus::INVALID_ARGUMENT; + break; } } } - - // NOTE: We verify that all operations in the model are supported. - V1_3::ErrorStatus outStatus = V1_3::ErrorStatus::INVALID_ARGUMENT; - auto ret = getSupportedOperations_1_3( - model, [&outStatus](V1_3::ErrorStatus inStatus, - const hardware::hidl_vec<bool>& supportedOperations) { - if (inStatus == V1_3::ErrorStatus::NONE) { - if (std::all_of(supportedOperations.begin(), supportedOperations.end(), - [](bool v) { return v; })) { - outStatus = V1_3::ErrorStatus::NONE; - } - } - }); - if (ret.isOk() && (outStatus == V1_3::ErrorStatus::NONE)) { - return SampleDriver::prepareModel_1_3(model, preference, priority, deadline, modelCache, - dataCache, token, callback); - } else { - callback->notify_1_3(V1_3::ErrorStatus::INVALID_ARGUMENT, nullptr); - return V1_3::ErrorStatus::INVALID_ARGUMENT; - } + cb->notify_1_3(status, new PartitioningPreparedModel); + return status; } - hardware::Return<V1_0::DeviceStatus> getStatus() override { - return V1_0::DeviceStatus::AVAILABLE; - } + Return<DeviceStatus> getStatus() override { return DeviceStatus::AVAILABLE; } - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { cb(V1_3::ErrorStatus::NONE, mCapabilities); - return hardware::Void(); + return Void(); } - hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model, - getSupportedOperations_1_3_cb cb) override { + Return<void> getSupportedOperations_1_3(const Model& model, + getSupportedOperations_1_3_cb cb) override { if (!android::nn::validateModel(model)) { cb(V1_3::ErrorStatus::INVALID_ARGUMENT, std::vector<bool>()); - return hardware::Void(); + return Void(); } cb(V1_3::ErrorStatus::NONE, getSupportedOperationsForSubgraph(model, model.main)); - return hardware::Void(); + return Void(); } - hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override { + Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) override { cb(V1_0::ErrorStatus::NONE, /*numModelCache=*/1, /*numDataCache=*/1); - return hardware::Void(); + return Void(); + } + + Return<V1_0::ErrorStatus> prepareModelFromCache( + const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&, const CacheToken&, + const sp<V1_2::IPreparedModelCallback>& callback) override { + callback->notify_1_2(V1_0::ErrorStatus::NONE, new PartitioningPreparedModel); + return V1_0::ErrorStatus::NONE; } private: - std::vector<bool> getSupportedOperationsForSubgraph(const V1_3::Model& model, - const V1_3::Subgraph& subgraph) { - CHECK(&subgraph == &model.main || - std::find_if(model.referenced.begin(), model.referenced.end(), - [&subgraph](const V1_3::Subgraph& refSubgraph) { - return &subgraph == &refSubgraph; - }) != model.referenced.end()); + std::vector<bool> getSupportedOperationsForSubgraph(const Model& model, + const Subgraph& subgraph) { auto supportsEntireSubgraph = [this, &model, &subgraph](uint32_t refSubgraphOperandIndex) { - CHECK_LT(refSubgraphOperandIndex, subgraph.operands.size()); - const V1_3::Operand& refSubgraphOperand = subgraph.operands[refSubgraphOperandIndex]; - CHECK(refSubgraphOperand.lifetime == V1_3::OperandLifeTime::SUBGRAPH); - CHECK_LT(refSubgraphOperand.location.offset, model.referenced.size()); - const V1_3::Subgraph& refSubgraph = - model.referenced[refSubgraphOperand.location.offset]; + const Operand& refSubgraphOperand = subgraph.operands[refSubgraphOperandIndex]; + const Subgraph& refSubgraph = model.referenced[refSubgraphOperand.location.offset]; std::vector<bool> supported = getSupportedOperationsForSubgraph(model, refSubgraph); return std::all_of(supported.begin(), supported.end(), [](bool x) { return x; }); }; const size_t count = subgraph.operations.size(); std::vector<bool> supported(count); for (size_t i = 0; i < count; i++) { - const V1_3::Operation& operation = subgraph.operations[i]; + const Operation operation = subgraph.operations[i]; if (mOperationTypes.count(operation.type)) { - if (operation.type == V1_3::OperationType::IF) { + if (operation.type == OperationType::IF) { namespace op = android::nn::operation_if; - CHECK_GE(operation.inputs.size(), op::kFirstInput); supported[i] = supportsEntireSubgraph(operation.inputs[op::kThenModelOperand]) && supportsEntireSubgraph(operation.inputs[op::kElseModelOperand]); - } else if (operation.type == V1_3::OperationType::WHILE) { + } else if (operation.type == OperationType::WHILE) { namespace op = android::nn::operation_while; - CHECK_GE(operation.inputs.size(), op::kFirstInput); supported[i] = supportsEntireSubgraph(operation.inputs[op::kCondModelOperand]) && supportsEntireSubgraph(operation.inputs[op::kBodyModelOperand]); @@ -449,7 +458,7 @@ } continue; } - if (operation.type == V1_3::OperationType::OEM_OPERATION) { + if (operation.type == OperationType::OEM_OPERATION) { supported[i] = (mOEM != OEMNo); continue; } @@ -464,75 +473,72 @@ } std::string mVersionString; - V1_3::Capabilities mCapabilities; + Capabilities mCapabilities; uint32_t mOperationMask; OEM mOEM; - std::set<V1_3::OperationType> mOperationTypes; + std::set<OperationType> mOperationTypes; }; // Like PartitioningDriver, but implementing 1.2 class PartitioningDriverV1_2 : public V1_2::IDevice { public: - PartitioningDriverV1_2(const char* name, const char* version, V1_3::Capabilities capabilities, + PartitioningDriverV1_2(const char* name, const char* version, Capabilities capabilities, uint32_t operationMask, PartitioningDriver::OEM oem = PartitioningDriver::OEMNo, - std::set<V1_3::OperationType> operationTypes = {}) + std::set<OperationType> operationTypes = {}) : mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem, operationTypes)) {} - hardware::Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override { + Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override { return mLatestDriver->getCapabilities_1_2(_hidl_cb); } - hardware::Return<void> getSupportedOperations_1_2( - const V1_2::Model& model, getSupportedOperations_1_2_cb _hidl_cb) override { + Return<void> getSupportedOperations_1_2(const V1_2::Model& model, + getSupportedOperations_1_2_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel_1_2( - const V1_2::Model& model, V1_1::ExecutionPreference preference, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token, + Return<V1_0::ErrorStatus> prepareModel_1_2( + const V1_2::Model& model, ExecutionPreference preference, + const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache, + const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token, actualCallback); } - hardware::Return<void> getVersionString(getVersionString_cb _hidl_cb) override { + Return<void> getVersionString(getVersionString_cb _hidl_cb) override { return mLatestDriver->getVersionString(_hidl_cb); } - hardware::Return<void> getType(getType_cb _hidl_cb) override { - return mLatestDriver->getType(_hidl_cb); - } - hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) { + Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); } + Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) { return mLatestDriver->getSupportedExtensions(_hidl_cb); } - hardware::Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) { + Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) { return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModelFromCache( - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token, - const sp<V1_2::IPreparedModelCallback>& callback) { + Return<V1_0::ErrorStatus> prepareModelFromCache( + const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache, + const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) { return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback); } - hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { + Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { return mLatestDriver->getCapabilities_1_1(_hidl_cb); } - hardware::Return<void> getSupportedOperations_1_1( - const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override { + Return<void> getSupportedOperations_1_1(const V1_1::Model& model, + getSupportedOperations_1_1_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel_1_1( - const V1_1::Model& model, V1_1::ExecutionPreference preference, + Return<V1_0::ErrorStatus> prepareModel_1_1( + const V1_1::Model& model, ExecutionPreference preference, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel_1_1(model, preference, actualCallback); } - hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } - hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { + Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } + Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { return mLatestDriver->getCapabilities(_hidl_cb); } - hardware::Return<void> getSupportedOperations(const V1_0::Model& model, - getSupportedOperations_cb _hidl_cb) override { + Return<void> getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel( + Return<V1_0::ErrorStatus> prepareModel( const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel(model, actualCallback); @@ -545,33 +551,33 @@ // Like PartitioningDriver, but implementing 1.1 class PartitioningDriverV1_1 : public V1_1::IDevice { public: - PartitioningDriverV1_1(const char* name, const char* version, V1_3::Capabilities capabilities, + PartitioningDriverV1_1(const char* name, const char* version, Capabilities capabilities, uint32_t operationMask, PartitioningDriver::OEM oem = PartitioningDriver::OEMNo, - std::set<V1_3::OperationType> operationTypes = {}) + std::set<OperationType> operationTypes = {}) : mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem, operationTypes)) {} - hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { + Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { return mLatestDriver->getCapabilities_1_1(_hidl_cb); } - hardware::Return<void> getSupportedOperations_1_1( - const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override { + Return<void> getSupportedOperations_1_1(const V1_1::Model& model, + getSupportedOperations_1_1_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel_1_1( - const V1_1::Model& model, V1_1::ExecutionPreference preference, + Return<V1_0::ErrorStatus> prepareModel_1_1( + const V1_1::Model& model, ExecutionPreference preference, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel_1_1(model, preference, actualCallback); } - hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } - hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { + Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } + Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { return mLatestDriver->getCapabilities(_hidl_cb); } - hardware::Return<void> getSupportedOperations(const V1_0::Model& model, - getSupportedOperations_cb _hidl_cb) override { + Return<void> getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel( + Return<V1_0::ErrorStatus> prepareModel( const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel(model, actualCallback); @@ -584,89 +590,30 @@ // Like PartitioningDriver, but implementing 1.0 class PartitioningDriverV1_0 : public V1_0::IDevice { public: - PartitioningDriverV1_0(const char* name, const char* version, V1_3::Capabilities capabilities, + PartitioningDriverV1_0(const char* name, const char* version, Capabilities capabilities, uint32_t operationMask, PartitioningDriver::OEM oem = PartitioningDriver::OEMNo, - std::set<V1_3::OperationType> operationTypes = {}) + std::set<OperationType> operationTypes = {}) : mLatestDriver(new PartitioningDriver(name, version, capabilities, operationMask, oem, operationTypes)) {} - hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { + Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { return mLatestDriver->getCapabilities(_hidl_cb); } - hardware::Return<void> getSupportedOperations(const V1_0::Model& model, - getSupportedOperations_cb _hidl_cb) override { + Return<void> getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb _hidl_cb) override { return mLatestDriver->getSupportedOperations(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel( + Return<V1_0::ErrorStatus> prepareModel( const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mLatestDriver->prepareModel(model, actualCallback); } - hardware::Return<V1_0::DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } + Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } private: const sp<V1_3::IDevice> mLatestDriver; }; -enum class Dimensioned { - NO, // either a scalar, or a tensor of either unspecified rank (usually) - // or specified rank but with no specified dimensions (where - // specifically stated) - RANK_1, // tensor of shape { 0 } -- i.e., rank 1, unspecified dimensions - RANK_2, // tensor of shape { 0, 0 } -- i.e., rank 2, unspecified dimensions - YES_1, // tensor of shape { 1 } - YES_2, // tensor of shape { 2 } - YES_4, // tensor of shape { 4 } - YES = YES_1 -}; - -std::vector<uint32_t> dimensions(Dimensioned dimensioned) { - switch (dimensioned) { - default: - EXPECT_TRUE(false) << "Unknown value"; - FALLTHROUGH_INTENDED; - case Dimensioned::NO: - return {}; - case Dimensioned::RANK_1: - return {0}; - case Dimensioned::RANK_2: - return {0, 0}; - case Dimensioned::YES_1: - return {1}; - case Dimensioned::YES_2: - return {2}; - case Dimensioned::YES_4: - return {4}; - } -} - -// "dimensioned" must be a fully specified kind -uint32_t numberOfElements(Dimensioned dimensioned) { - auto dims = dimensions(dimensioned); - uint32_t result = std::reduce(dims.begin(), dims.end(), 1u, std::multiplies<>()); - CHECK_GT(result, 0u); - return result; -} - -std::string toString(Dimensioned dimensioned) { - switch (dimensioned) { - default: - return "<Unknown value>"; - case Dimensioned::NO: - return "NO"; - case Dimensioned::RANK_1: - return "RANK_1"; - case Dimensioned::RANK_2: - return "RANK_2"; - case Dimensioned::YES_1: - return "YES_1"; - case Dimensioned::YES_2: - return "YES_2"; - case Dimensioned::YES_4: - return "YES_4"; - } -} - // This class adds some simple abstractions and utilities on top of // WrapperModel. For example, it provides methods that work in terms of // operation kind (0..7); and because we care about graph topology rather than @@ -679,20 +626,11 @@ using WrapperModel::identifyInputsAndOutputs; using WrapperModel::isValid; using WrapperModel::relaxComputationFloat32toFloat16; - using WrapperModel::setOperandValue; + + enum class Dimensioned { NO, YES }; // Create a tensor operand of the specified type, and return the // corresponding operand index. - uint32_t addIntOperand(Dimensioned dimensioned = Dimensioned::YES) { - return addOperand(WrapperType::TENSOR_INT32, dimensioned); - } - uint32_t addIntScalarOperand(std::optional<int> v = std::nullopt) { - uint32_t opnd = addOperand(WrapperType::INT32); - if (v.has_value()) { - setOperandValue(opnd, &v.value()); - } - return opnd; - } uint32_t addFloatOperand(Dimensioned dimensioned = Dimensioned::YES) { return addOperand(WrapperType::TENSOR_FLOAT32, dimensioned); } @@ -702,20 +640,18 @@ uint32_t addBooleanOperand(Dimensioned dimensioned = Dimensioned::YES) { return addOperand(WrapperType::TENSOR_BOOL8, dimensioned); } - uint32_t addFloatZeroOperand(Dimensioned dimensioned = Dimensioned::YES) { - uint32_t opnd = addFloatOperand(dimensioned); - std::vector<float> values(numberOfElements(dimensioned), 0.0f); - uint32_t size = values.size() * sizeof(float); - // Make sure the values are immediately copied so that it is safe to free the buffer after - // the setOperandValue call - CHECK_LE(size, ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES); - setOperandValue(opnd, values.data(), size); - return opnd; - } // Create an operand of the specified type, and return the corresponding // operand index. uint32_t addOperand(WrapperType wrapperType, Dimensioned dimensioned = Dimensioned::YES) { + auto dimensions = [dimensioned]() -> std::vector<uint32_t> { + if (dimensioned == Dimensioned::YES) { + return {1}; + } else { + return {}; + } + }; + switch (static_cast<int>(wrapperType)) { case ANEURALNETWORKS_BOOL: case ANEURALNETWORKS_FLOAT16: @@ -723,26 +659,30 @@ case ANEURALNETWORKS_INT32: case ANEURALNETWORKS_UINT32: case ANEURALNETWORKS_MODEL: - case ANEURALNETWORKS_OEM_SCALAR: + case ANEURALNETWORKS_OEM_SCALAR: { return addOperand(WrapperOperandType{wrapperType, {}}); + } case ANEURALNETWORKS_TENSOR_BOOL8: case ANEURALNETWORKS_TENSOR_FLOAT16: case ANEURALNETWORKS_TENSOR_FLOAT32: - case ANEURALNETWORKS_TENSOR_OEM_BYTE: - return addOperand(WrapperOperandType{wrapperType, dimensions(dimensioned)}); + case ANEURALNETWORKS_TENSOR_OEM_BYTE: { + return addOperand(WrapperOperandType{wrapperType, dimensions()}); + } case ANEURALNETWORKS_TENSOR_INT32: case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM: case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED: case ANEURALNETWORKS_TENSOR_QUANT8_SYMM: case ANEURALNETWORKS_TENSOR_QUANT16_ASYMM: - case ANEURALNETWORKS_TENSOR_QUANT16_SYMM: - return addOperand(WrapperOperandType{wrapperType, dimensions(dimensioned), 1.0f}); + case ANEURALNETWORKS_TENSOR_QUANT16_SYMM: { + return addOperand(WrapperOperandType{wrapperType, dimensions(), 1.0f}); + } - case ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL: - return addOperand(WrapperOperandType{wrapperType, dimensions(dimensioned), + case ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL: { + return addOperand(WrapperOperandType{wrapperType, dimensions(), WrapperSymmPerChannelQuantParams({1.0f}, 0)}); + } default: ADD_FAILURE() << "Unexpected type " << static_cast<uint32_t>(wrapperType); @@ -845,7 +785,7 @@ // Run the partitioning algorithm to create an ExecutionPlan. int partitionTheWork(const std::vector<std::shared_ptr<Device>>& devices, ExecutePreference preference, ExecutePriority priority, - const OptionalTimePoint& deadline, ExecutionPlan* plan) { + const std::optional<Deadline>& deadline, ExecutionPlan* plan) { return reinterpret_cast<ModelBuilder*>(getHandle()) ->partitionTheWork(devices, static_cast<uint32_t>(preference), static_cast<int32_t>(priority), deadline, plan); @@ -912,21 +852,11 @@ // Create an operand of the same type as the specified operand, // and return the operand index of the new operand. - // - // If a tensor, the new operand will have the same rank as the specified - // operand. If dimensioned == Dimensioned::NO, then all dimensions of a new - // tensor operand will be unspecified. If dimensioned != Dimensioned::NO, - // then all dimensions of a new tensor operand will have the implied value - // (e.g., YES_1 means each dimension will have the value "1"). uint32_t addOperandOfSameType(uint32_t operand, Dimensioned dimensioned = Dimensioned::YES) { WrapperOperandType type = mWrapperOperandType.at(operand); - - const auto d = dimensions(dimensioned); - EXPECT_TRUE(d.size() <= 1); for (auto& dimension : type.dimensions) { - dimension = (dimensioned == Dimensioned::NO ? 0 : d[0]); + dimension = (dimensioned == Dimensioned::YES); } - mWrapperOperandType.push_back(type); return WrapperModel::addOperand(&type); } @@ -948,13 +878,7 @@ } Result setPartitioning(uint32_t partitioning) { - return static_cast<Result>(builder()->forTest_setPartitioning(partitioning)); - } - - // Simulate recoverable partitioning failure. - Result failPartitioning() { - return static_cast<Result>( - builder()->forTest_failPartitioning(static_cast<int>(Result::OP_FAILED))); + return static_cast<Result>(builder()->setPartitioning(partitioning)); } using WrapperCompilation::finish; @@ -992,21 +916,15 @@ class PartitioningTest : public ::testing::Test { protected: - using DynamicTemporariesType = decltype(ExecutionPlan().forTest_flatGetDynamicTemporaries()); using RemapVectorType = ExecutionStep::RemapVectorType; using StepModelOutputSetType = ExecutionStep::StepModelOutputSetType; - // Used for PartitioningTest::checkExecutionPlanSteps. - static constexpr const char* kIfStep = "IF"; - static constexpr const char* kWhileStep = "WHILE"; - static constexpr const char* kGotoStep = "GOTO"; - virtual void SetUp() {} // From a vector of DeviceSpecification, create a vector of // Devices. struct DeviceSpecification { - DeviceSpecification(const std::string& name, const V1_3::Capabilities& capabilities, + DeviceSpecification(const std::string& name, const Capabilities& capabilities, uint32_t operationMask, PartitioningDriver::OEM oem = PartitioningDriver::OEMNo) : mName(name), @@ -1016,38 +934,30 @@ mOEM(oem) {} DeviceSpecification(const std::string& name, float perf, uint32_t operationMask, PartitioningDriver::OEM oem = PartitioningDriver::OEMNo, - HalVersion halVersion = HalVersion::LATEST, - std::set<V1_3::OperationType> operationTypes = {}) - : DeviceSpecification(name, perf, perf, operationMask, oem, halVersion, - operationTypes) {} + std::set<OperationType> operationTypes = {}) + : DeviceSpecification(name, perf, perf, operationMask, oem, operationTypes) {} DeviceSpecification(const std::string& name, float perf, float perfRelaxed, uint32_t operationMask, PartitioningDriver::OEM oem = PartitioningDriver::OEMNo, - HalVersion halVersion = HalVersion::LATEST, - std::set<V1_3::OperationType> operationTypes = {}) + std::set<OperationType> operationTypes = {}) : DeviceSpecification(name, kVersionString, perf, perfRelaxed, operationMask, oem, - halVersion, operationTypes) {} + operationTypes) {} DeviceSpecification(const std::string& name, const std::string& version, float perf, uint32_t operationMask, PartitioningDriver::OEM oem = PartitioningDriver::OEMNo, - HalVersion halVersion = HalVersion::LATEST, - std::set<V1_3::OperationType> operationTypes = {}) - : DeviceSpecification(name, version, perf, perf, operationMask, oem, halVersion, - operationTypes) {} + std::set<OperationType> operationTypes = {}) + : DeviceSpecification(name, version, perf, perf, operationMask, oem, operationTypes) {} DeviceSpecification(const std::string& name, const std::string& version, float perf, float perfRelaxed, uint32_t operationMask, PartitioningDriver::OEM oem = PartitioningDriver::OEMNo, - HalVersion halVersion = HalVersion::LATEST, - std::set<V1_3::OperationType> operationTypes = {}) + std::set<OperationType> operationTypes = {}) : mName(name), mVersionString(version), - mHalVersion(halVersion), mOperationMask(operationMask), mOEM(oem), mOperationTypes(std::move(operationTypes)) { - V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf}; - V1_0::PerformanceInfo perfRelaxedInfo = {.execTime = perfRelaxed, - .powerUsage = perfRelaxed}; + PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf}; + PerformanceInfo perfRelaxedInfo = {.execTime = perfRelaxed, .powerUsage = perfRelaxed}; mCapabilities = { .relaxedFloat32toFloat16PerformanceScalar = perfRelaxedInfo, .relaxedFloat32toFloat16PerformanceTensor = perfRelaxedInfo, @@ -1069,11 +979,11 @@ std::string mName; std::string mVersionString; - V1_3::Capabilities mCapabilities; + Capabilities mCapabilities; HalVersion mHalVersion = HalVersion::LATEST; uint32_t mOperationMask; PartitioningDriver::OEM mOEM = PartitioningDriver::OEMNo; - std::set<V1_3::OperationType> mOperationTypes; + std::set<OperationType> mOperationTypes; static constexpr char kVersionString[] = "JUST_AN_EXAMPLE"; @@ -1127,96 +1037,42 @@ std::vector<DeviceSpecification> specifications) { std::vector<std::shared_ptr<Device>> devices; for (const auto& specification : specifications) { - SharedDevice device = nullptr; + V1_0::IDevice* halDriver = nullptr; switch (specification.mHalVersion) { case HalVersion::V1_3: - device = android::nn::makeSharedDevice( - specification.mName, - new PartitioningDriver(specification.mName.c_str(), - specification.mVersionString.c_str(), - specification.mCapabilities, - specification.mOperationMask, specification.mOEM, - specification.mOperationTypes)); + halDriver = new PartitioningDriver( + specification.mName.c_str(), specification.mVersionString.c_str(), + specification.mCapabilities, specification.mOperationMask, + specification.mOEM, specification.mOperationTypes); break; case HalVersion::V1_2: - device = android::nn::makeSharedDevice( - specification.mName, - new PartitioningDriverV1_2( - specification.mName.c_str(), - specification.mVersionString.c_str(), - specification.mCapabilities, specification.mOperationMask, - specification.mOEM, specification.mOperationTypes)); + halDriver = new PartitioningDriverV1_2( + specification.mName.c_str(), specification.mVersionString.c_str(), + specification.mCapabilities, specification.mOperationMask, + specification.mOEM, specification.mOperationTypes); break; case HalVersion::V1_1: - device = android::nn::makeSharedDevice( - specification.mName, - new PartitioningDriverV1_1( - specification.mName.c_str(), - specification.mVersionString.c_str(), - specification.mCapabilities, specification.mOperationMask, - specification.mOEM, specification.mOperationTypes)); + halDriver = new PartitioningDriverV1_1( + specification.mName.c_str(), specification.mVersionString.c_str(), + specification.mCapabilities, specification.mOperationMask, + specification.mOEM, specification.mOperationTypes); break; case HalVersion::V1_0: - device = android::nn::makeSharedDevice( - specification.mName, - new PartitioningDriverV1_0( - specification.mName.c_str(), - specification.mVersionString.c_str(), - specification.mCapabilities, specification.mOperationMask, - specification.mOEM, specification.mOperationTypes)); + halDriver = new PartitioningDriverV1_0( + specification.mName.c_str(), specification.mVersionString.c_str(), + specification.mCapabilities, specification.mOperationMask, + specification.mOEM, specification.mOperationTypes); break; default: ADD_FAILURE() << "Unexpected"; } - auto driverDevice = DeviceManager::forTest_makeDriverDevice(device); - devices.push_back(std::move(driverDevice)); + auto device = DeviceManager::forTest_makeDriverDevice(specification.mName, halDriver); + devices.push_back(device); } devices.push_back(DeviceManager::getCpuDevice()); return devices; } - static std::string stepsToString(const std::vector<std::string>& steps) { - std::stringstream ss; - ss << "[ "; - for (const auto& step : steps) { - ss << step << " "; - } - ss << "]"; - return ss.str(); - } - - // Checks the type of each logical step in an execution plan. - // Each entry of "expected" is either: kIfStep for IfStep, kWhileStep for WhileStep, - // kGotoStep for GotoStep, or the device name for ExecutionStep. - void checkExecutionPlanSteps(const ExecutionPlan& plan, - const std::vector<std::string>& expected) { - ASSERT_GT(expected.size(), 0u); - - std::vector<std::string> actual; - if (expected.size() == 1) { - ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); - actual.emplace_back(plan.forTest_simpleGetDevice()->getName()); - } else { - ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::COMPOUND); - const auto& steps = plan.forTest_compoundGetSteps(); - for (const auto& step : steps) { - if (step->isIf()) { - actual.emplace_back(kIfStep); - } else if (step->isWhile()) { - actual.emplace_back(kWhileStep); - } else if (step->isGoto()) { - actual.emplace_back(kGotoStep); - } else if (step->isExecution()) { - actual.emplace_back(step->executionStep()->getDevice()->getName()); - } else { - ASSERT_FALSE(true) << "Unknown LogicalStep"; - } - } - } - ASSERT_TRUE(actual == expected) - << "expected: " << stepsToString(expected) << ", actual: " << stepsToString(actual); - } - /*-- Graph comparision ----------------------------------------------------------------*/ // An operand with certain values for its lifetime does not have a @@ -1256,7 +1112,7 @@ // actual definitions ASSERT_LT(model->operationCount(), kPseudoDefiningOperationBase); for (uint32_t i = 0, e = model->operationCount(); i < e; i++) { - const V1_3::Operation& operation = android::nn::convertToV1_3(model->getOperation(i)); + const Operation& operation = model->getOperation(i); for (uint32_t output : operation.outputs) { (*defMap)[output] = i; } @@ -1268,12 +1124,12 @@ } // look for NO_VALUE and CONSTANT_COPY for (uint32_t i = 0, e = model->operandCount(); i < e; i++) { - const V1_3::Operand& operand = android::nn::convertToV1_3(model->getOperand(i)); + const Operand& operand = model->getOperand(i); switch (operand.lifetime) { - case V1_3::OperandLifeTime::NO_VALUE: + case OperandLifeTime::NO_VALUE: (*defMap)[i] = kPseudoDefiningOperationNoValue; break; - case V1_3::OperandLifeTime::CONSTANT_COPY: { + case OperandLifeTime::CONSTANT_COPY: { ASSERT_EQ(operand.location.length, sizeof(uint32_t)); uint32_t value; memcpy(&value, model->getPointerToOperandValue(operand.location.offset), @@ -1282,9 +1138,9 @@ (*defMap)[i] = kPseudoDefiningOperationConstantCopy0 + value; break; } - case V1_3::OperandLifeTime::TEMPORARY_VARIABLE: - case V1_3::OperandLifeTime::SUBGRAPH_INPUT: - case V1_3::OperandLifeTime::SUBGRAPH_OUTPUT: + case OperandLifeTime::TEMPORARY_VARIABLE: + case OperandLifeTime::SUBGRAPH_INPUT: + case OperandLifeTime::SUBGRAPH_OUTPUT: // already handled break; default: @@ -1292,7 +1148,7 @@ break; } } - // validity check + // sanity check ASSERT_EQ(model->operandCount(), defMap->size()); } @@ -1326,6 +1182,7 @@ bool compare(const Operand& operandA, const Operand& operandB) { if (operandA.type != operandB.type || operandA.dimensions != operandB.dimensions || + operandA.numberOfConsumers != operandB.numberOfConsumers || operandA.scale != operandB.scale || operandA.zeroPoint != operandB.zeroPoint) { return false; } @@ -1403,12 +1260,6 @@ uint32_t outputA = modelA->getOutputOperandIndex(i); uint32_t outputB = modelB->getOutputOperandIndex(i); if (!compare(modelA->getOperand(outputA), modelB->getOperand(outputB))) { -#ifdef VERBOSE - std::cout << "modelA.output[" << i << "] = operand[" << outputA - << "] = " << toString(modelA->getOperand(outputA)) << std::endl; - std::cout << "modelB.output[" << i << "] = operand[" << outputB - << "] = " << toString(modelB->getOperand(outputB)) << std::endl; -#endif RETURN_FALSE(); } equivalentOperandsAToB[outputA] = outputB; @@ -1486,12 +1337,6 @@ } // We haven't identified an equivalent operand for inputA. if (!compare(modelA->getOperand(inputA), modelB->getOperand(inputB))) { -#ifdef VERBOSE - std::cout << "modelA.input[" << i << "] = operand[" << inputA - << "] = " << toString(modelA->getOperand(inputA)) << std::endl; - std::cout << "modelB.input[" << i << "] = operand[" << inputB - << "] = " << toString(modelB->getOperand(inputB)) << std::endl; -#endif RETURN_FALSE(); } equivalentOperandsAToB[inputA] = inputB; @@ -1499,7 +1344,7 @@ } } - // Validity check + // Sanity check if (modelA->operandCount() != defsA.size() || modelA->operandCount() != defsB.size() || modelA->operandCount() != equivalentOperandsAToB.size() || modelA->operationCount() + pseudoDefinitionCount != equivalentOperationsAToB.size()) { @@ -1537,8 +1382,7 @@ std::shared_ptr<Device> device, const RemapVectorType& modelInputs, const RemapVectorType& modelOutputs, const RemapVectorType& tempsAsStepModelInputs, const StepModelOutputSetType& tempsAsStepModelOutputs, - const RemapVectorType& outputsAsStepModelInputs, - const std::set<uint32_t>& modelOutputsThatAreDownstreamInputs) { + const RemapVectorType& outputsAsStepModelInputs) { ASSERT_TRUE(logicalStep->isExecution()); const ExecutionStep* step = logicalStep->executionStep(); std::map<uint32_t, uint32_t> inputsAndOutputsModelToStep; @@ -1556,8 +1400,6 @@ ASSERT_TRUE(compareRemapVectors(inputsAndOutputsModelToStep, step->getOutputsAsStepModelInputs(), outputsAsStepModelInputs)); - ASSERT_TRUE(modelOutputsThatAreDownstreamInputs == - step->getModelOutputsThatAreDownstreamInputs()); } private: @@ -1603,7 +1445,6 @@ ASSERT_EQ(model.partitionTheWork(devicesA, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &planA), ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(planA.forTest_flatGetDynamicTemporaries().empty()); ASSERT_EQ(planA.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); ASSERT_NE(planA.forTest_simpleGetDevice().get(), nullptr); ASSERT_EQ(planA.forTest_simpleGetDevice()->getName(), "good"); @@ -1616,7 +1457,6 @@ ASSERT_EQ(model.partitionTheWork(devicesC, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &planC), ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(planC.forTest_flatGetDynamicTemporaries().empty()); ASSERT_EQ(planC.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); ASSERT_EQ(planC.forTest_simpleGetDevice(), DeviceManager::getCpuDevice()); @@ -1629,7 +1469,6 @@ ASSERT_EQ(model.partitionTheWork(devicesB, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &planB), ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(planB.forTest_flatGetDynamicTemporaries().empty()); ASSERT_EQ(planB.forTest_getKind(), ExecutionPlan::Kind::COMPOUND); const auto& stepsB = planB.forTest_compoundGetSteps(); ASSERT_EQ(stepsB.size(), size_t(2)); @@ -1649,8 +1488,7 @@ RemapVectorType{}, // modelOutputs RemapVectorType{}, // tempsAsStepModelInputs StepModelOutputSetType{{opnd2, b0Opnd2}}, // tempsAsStepModelOutputs - RemapVectorType{}, // outputsAsStepModelInputs - {})); // modelOutputsThatAreDownstreamInputs + RemapVectorType{})); // outputsAsStepModelInputs; } { // Build a model to compare against the step model from stepsB[1]. @@ -1672,8 +1510,7 @@ RemapVectorType{{opnd4, b1Opnd4}}, // modelOutputs RemapVectorType{{opnd2, b1Opnd2}}, // tempsAsStepModelInputs StepModelOutputSetType{}, // tempsAsStepModelOutputs - RemapVectorType{}, // outputsAsStepModelInputs - {})); // modelOutputsThatAreDownstreamInputs + RemapVectorType{})); // outputsAsStepModelInputs } } @@ -1701,7 +1538,6 @@ ASSERT_EQ(model.partitionTheWork(devicesA, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &planA), ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(planA.forTest_flatGetDynamicTemporaries().empty()); ASSERT_EQ(planA.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); ASSERT_NE(planA.forTest_simpleGetDevice().get(), nullptr); ASSERT_EQ(planA.forTest_simpleGetDevice()->getName(), "V1_3"); @@ -1716,7 +1552,6 @@ ASSERT_EQ(model.partitionTheWork(devicesB, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &planB), ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(planB.forTest_flatGetDynamicTemporaries().empty()); ASSERT_EQ(planB.forTest_getKind(), ExecutionPlan::Kind::COMPOUND); const auto& stepsB = planB.forTest_compoundGetSteps(); ASSERT_EQ(stepsB.size(), size_t(4)); @@ -1736,8 +1571,7 @@ RemapVectorType{{opnd4, b0Opnd2}}, // modelOutputs RemapVectorType{}, // tempsAsStepModelInputs StepModelOutputSetType{}, // tempsAsStepModelOutputs - RemapVectorType{}, // outputsAsStepModelInputs - {})); // modelOutputsThatAreDownstreamInputs + RemapVectorType{})); // outputsAsStepModelInputs } { // Build a model to compare against the step model from stepsB[1]. @@ -1750,16 +1584,13 @@ modelB1.finish(); ASSERT_TRUE(modelB1.isValid()); - // Note that this is also an important test that we can detect - // modelOutputsThatAreDownstreamInputs. ASSERT_NO_FATAL_FAILURE( compare(stepsB[1], &modelB1, devicesB[0], RemapVectorType{{opnd0, b1Opnd0}, {opnd1, b1Opnd1}}, // modelInputs RemapVectorType{{opnd2, b1Opnd2}}, // modelOutputs RemapVectorType{}, // tempsAsStepModelInputs StepModelOutputSetType{{opnd3, b1Opnd3}}, // tempsAsStepModelOutputs - RemapVectorType{}, // outputsAsStepModelInputs - {0u})); // modelOutputsThatAreDownstreamInputs + RemapVectorType{})); // outputsAsStepModelInputs } { // Build a model to compare against the step model from stepsB[2]. @@ -1776,10 +1607,9 @@ ASSERT_NO_FATAL_FAILURE( compare(stepsB[2], &modelB2, devicesB[3], RemapVectorType{}, // modelInputs RemapVectorType{{opnd6, b2Opnd1}}, // modelOutputs - RemapVectorType{}, // tempsAsStepModelInputs - StepModelOutputSetType{}, // tempsAsStepModelOutputs - RemapVectorType{{opnd2, b2Opnd0}}, // outputsAsStepModelInputs - {})); // modelOutputsThatAreDownstreamInputs + RemapVectorType{}, // tempsAsStepModelInputs + StepModelOutputSetType{}, // tempsAsStepModelOutputs + RemapVectorType{{opnd2, b2Opnd0}})); // outputsAsStepModelInputs } { // Build a model to compare against the step model from stepsB[3]. @@ -1800,10 +1630,9 @@ ASSERT_NO_FATAL_FAILURE( compare(stepsB[3], &modelB3, devicesB[2], RemapVectorType{}, // modelInputs RemapVectorType{{opnd5, b3Opnd2}}, // modelOutputs - RemapVectorType{{opnd3, b3Opnd1}}, // tempsAsStepModelInputs - StepModelOutputSetType{}, // tempsAsStepModelOutputs - RemapVectorType{{opnd2, b3Opnd0}}, // outputsAsStepModelInputs - {})); // modelOutputsThatAreDownstreamInputs + RemapVectorType{{opnd3, b3Opnd1}}, // tempsAsStepModelInputs + StepModelOutputSetType{}, // tempsAsStepModelOutputs + RemapVectorType{{opnd2, b3Opnd0}})); // outputsAsStepModelInputs } // TODO: Make sure this still works when we have multiple devices @@ -1831,7 +1660,6 @@ ASSERT_EQ(model.partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &plan), ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(plan.forTest_flatGetDynamicTemporaries().empty()); ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); ASSERT_NE(plan.forTest_simpleGetDevice().get(), nullptr); ASSERT_EQ(plan.forTest_simpleGetDevice()->getName(), "V1_3"); @@ -1871,7 +1699,6 @@ ASSERT_EQ(model.partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &plan), ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(plan.forTest_flatGetDynamicTemporaries().empty()); ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::COMPOUND); const auto& steps = plan.forTest_compoundGetSteps(); ASSERT_EQ(steps.size(), size_t(3)); @@ -1895,8 +1722,7 @@ RemapVectorType{}, // tempsAsStepModelInputs StepModelOutputSetType{{opnd2, m0Opnd2}, {opnd3, m0Opnd3}}, // tempsAsStepModelOutputs - RemapVectorType{}, // outputsAsStepModelInputs - {})); // modelOutputsThatAreDownstreamInputs + RemapVectorType{})); // outputsAsStepModelInputs } { const auto& step1 = steps[1]; @@ -1918,8 +1744,7 @@ RemapVectorType{{opnd4, m1Opnd4}}, // modelOutputs RemapVectorType{{opnd3, m1Opnd3}, {opnd2, m1Opnd2}}, // tempsAsStepModelInputs StepModelOutputSetType{{opnd5, m1Opnd5}}, // tempsAsStepModelOutputs - RemapVectorType{}, // outputsAsStepModelInputs - {})); // modelOutputsThatAreDownstreamInputs + RemapVectorType{})); // outputsAsStepModelInputs } { const auto& step2 = steps[2]; @@ -1940,8 +1765,7 @@ RemapVectorType{{opnd8, m2Opnd8}}, // modelOutputs RemapVectorType{{opnd3, m2Opnd3}, {opnd5, m2Opnd5}}, // tempsAsStepModelInputs StepModelOutputSetType{}, // tempsAsStepModelOutputs - RemapVectorType{}, // outputsAsStepModelInputs - {})); // modelOutputsThatAreDownstreamInputs + RemapVectorType{})); // outputsAsStepModelInputs } } @@ -1949,13 +1773,18 @@ PartitioningModel model; uint32_t opnd0 = model.addFloatOperand(); uint32_t opnd1 = model.addFloatOperand(); - uint32_t opnd2 = model.addOperation2To1V1_0(0, opnd0, opnd1, Dimensioned::NO); + uint32_t opnd2 = + model.addOperation2To1V1_0(0, opnd0, opnd1, PartitioningModel::Dimensioned::NO); uint32_t opnd3 = model.addFloatOperand(); uint32_t opnd4 = model.addOperation2To1V1_0(1, opnd2, opnd3); model.identifyInputsAndOutputs({opnd0, opnd1, opnd3}, {opnd4}); model.finish(); ASSERT_TRUE(model.isValid()); + // We expect that we cannot successfully partition, because we + // have an intermediate operand (opnd2) without dimensions, and + // this is not currently handled. + // One device that can and should execute operation 0. const auto devices = makeDevices({{"hw", 0.5, (1 << 0)}}); @@ -1965,31 +1794,32 @@ // didn't actually do any partitioning. PartitioningCompilation cPNo(&model, devices); ASSERT_EQ(cPNo.setPartitioning(DeviceManager::kPartitioningNo), Result::NO_ERROR); - ASSERT_EQ(cPNo.failPartitioning(), Result::NO_ERROR); ASSERT_EQ(cPNo.finish(), Result::NO_ERROR); ASSERT_EQ(cPNo.getExecutionPlan().forTest_getKind(), ExecutionPlan::Kind::SIMPLE); ASSERT_EQ(cPNo.getExecutionPlan().forTest_simpleGetDevice(), DeviceManager::getCpuDevice()); - // Test kPartitioningWithFallback. We should attempt partitioning, simulate - // a recoverable failure, then fallback to CPU with a SIMPLE plan, and - // finally return success. No need to compare the original model to the - // model from the plan -- we didn't actually do any partitioning. + // Test kPartitioningWithFallback. We should attempt + // partitioning, reach the end of the partitioning process (so we + // have an unsuccessful execution plan), discover the dimensionless + // intermediate operand, then fallback to CPU with a SIMPLE plan, and + // finally return success. + // No need to compare the original model to the model from the plan -- we + // didn't actually do any partitioning. PartitioningCompilation cPWithFallback(&model, devices); ASSERT_EQ(cPWithFallback.setPartitioning(DeviceManager::kPartitioningWithFallback), Result::NO_ERROR); - ASSERT_EQ(cPWithFallback.failPartitioning(), Result::NO_ERROR); ASSERT_EQ(cPWithFallback.finish(), Result::NO_ERROR); ASSERT_EQ(cPWithFallback.getExecutionPlan().forTest_getKind(), ExecutionPlan::Kind::SIMPLE); ASSERT_EQ(cPWithFallback.getExecutionPlan().forTest_simpleGetDevice(), DeviceManager::getCpuDevice()); - // Test kPartitioningWithoutFallback. We should attempt partitioning, - // simulate a recoverable failure, and fail. + // Test kPartitioningWithoutFallback. We should attempt + // partitioning, and fail. PartitioningCompilation cPWithoutFallback(&model, devices); ASSERT_EQ(cPWithoutFallback.setPartitioning(DeviceManager::kPartitioningWithoutFallback), Result::NO_ERROR); - ASSERT_EQ(cPWithoutFallback.failPartitioning(), Result::NO_ERROR); ASSERT_EQ(cPWithoutFallback.finish(), Result::OP_FAILED); + ASSERT_TRUE(cPWithoutFallback.getExecutionPlan().forTest_hasStepModelOutputsOfUnknownSize()); ASSERT_EQ(cPWithoutFallback.getExecutionPlan().forTest_getKind(), ExecutionPlan::Kind::ERROR); } @@ -2015,7 +1845,6 @@ ASSERT_EQ(model.partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &plan), ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(plan.forTest_flatGetDynamicTemporaries().empty()); ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::COMPOUND); const auto& steps = plan.forTest_compoundGetSteps(); ASSERT_EQ(steps.size(), size_t(2)); @@ -2034,8 +1863,7 @@ RemapVectorType{{opnd2, m0Opnd2}}, // modelOutputs RemapVectorType{}, // tempsAsStepModelInputs StepModelOutputSetType{}, // tempsAsStepModelOutputs - RemapVectorType{}, // outputsAsStepModelInputs - {0u})); // modelOutputsThatAreDownstreamInputs + RemapVectorType{})); // outputsAsStepModelInputs } { // Build a model to compare against the step model from steps[1]. @@ -2050,9 +1878,8 @@ compare(steps[1], &model1, devices[1], RemapVectorType{}, // modelInputs RemapVectorType{{opnd3, m1Opnd3}}, // modelOutputs RemapVectorType{}, // tempsAsStepModelInputs - StepModelOutputSetType{}, // tempsAsStepModelOutputs - RemapVectorType{{opnd2, m1Opnd2}}, // outputsAsStepModelInputs - {})); // modelOutputsThatAreDownstreamInputs + StepModelOutputSetType{}, // tempsAsStepModelOutputs + RemapVectorType{{opnd2, m1Opnd2}})); // outputsAsStepModelInputs } } @@ -2119,7 +1946,6 @@ ASSERT_EQ(model.partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &plan), ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(plan.forTest_flatGetDynamicTemporaries().empty()); ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); ASSERT_EQ(plan.forTest_simpleGetDevice()->getName(), expectDevice); }; @@ -2139,8 +1965,8 @@ // WrapperOperandType is the NeuralNetworksWrapper.h representation of a // full operand type (WrapperType plus dimensions plus other attributes). - auto TestType = [](V1_3::OperandType operandType) { - if (operandType == V1_3::OperandType::SUBGRAPH) { + auto TestType = [](OperandType operandType) { + if (operandType == OperandType::SUBGRAPH) { // SUBGRAPH capabilities are handled differently. return; } @@ -2155,11 +1981,11 @@ model.finish(); ASSERT_TRUE(model.isValid()); - const V1_3::Capabilities baseCapabilities = ::android::nn::makeCapabilities(0.5); + const Capabilities baseCapabilities = makeCapabilities(0.5); { // better than base - V1_3::Capabilities goodCapabilities = baseCapabilities; + Capabilities goodCapabilities = baseCapabilities; update(&goodCapabilities, operandType, 0.25); const auto devices = @@ -2173,14 +1999,13 @@ ASSERT_EQ(model.partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &plan), ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(plan.forTest_flatGetDynamicTemporaries().empty()); ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); ASSERT_EQ(plan.forTest_simpleGetDevice()->getName(), "good"); } { // worse than base - V1_3::Capabilities badCapabilities = baseCapabilities; + Capabilities badCapabilities = baseCapabilities; update(&badCapabilities, operandType, 0.75); const auto devices = makeDevices({{"base", baseCapabilities, ~0U, PartitioningDriver::OEMYes}, @@ -2193,417 +2018,21 @@ ASSERT_EQ(model.partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &plan), ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(plan.forTest_flatGetDynamicTemporaries().empty()); ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); ASSERT_EQ(plan.forTest_simpleGetDevice()->getName(), "base"); } }; - for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN); - type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) { - TestType(static_cast<V1_3::OperandType>(type)); + for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN); + type <= static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX); ++type) { + TestType(static_cast<OperandType>(type)); } - for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MIN); - type <= static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MAX); ++type) { - TestType(static_cast<V1_3::OperandType>(type)); + for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::OEM_MIN); + type <= static_cast<uint32_t>(OperandTypeRange::OEM_MAX); ++type) { + TestType(static_cast<OperandType>(type)); } } -TEST_F(PartitioningTest, ZeroInputStepModel) { - PartitioningModel model; - const uint32_t opnd0 = model.addFloatZeroOperand(); - const uint32_t opnd1 = model.addOperation1To1V1_3(0, opnd0); - const uint32_t opnd2 = model.addFloatOperand(); - const uint32_t opnd3 = model.addOperation2To1V1_0(1, opnd1, opnd2); - model.identifyInputsAndOutputs({opnd2}, {opnd3}); - ASSERT_EQ(model.finish(), Result::NO_ERROR); - - // This will result in 2 partitions: deviceA handles op0, deviceB handles op1. - // The partition for deviceA does not have any model input, and should result in full CPU - // fallback. - const auto devices = makeDevices({{"deviceA", 0.8, ~0U}, {"deviceB", 0.5, 1 << 1}}); - PartitioningCompilation compilation(&model, devices); - ASSERT_EQ(compilation.finish(), Result::NO_ERROR); - const auto& cpuDeviceName = DeviceManager::getCpuDevice()->getName(); - checkExecutionPlanSteps(compilation.getExecutionPlan(), {cpuDeviceName}); -} - -TEST_F(PartitioningTest, ZeroOutputStepModel) { - PartitioningModel model; - const uint32_t opnd0 = model.addFloatOperand(); - const uint32_t opnd1 = model.addOperation1To1V1_3(0, opnd0); - const uint32_t opnd2 = model.addFloatOperand(); - model.addOperation2To1V1_0(1, opnd1, opnd2); - model.identifyInputsAndOutputs({opnd0, opnd2}, {opnd1}); - ASSERT_EQ(model.finish(), Result::NO_ERROR); - - // This will result in 2 partitions: deviceA handles op0, deviceB handles op1. - // The partition for deviceB does not have any model output, and should result in full CPU - // fallback. - const auto devices = makeDevices({{"deviceA", 0.8, ~0U}, {"deviceB", 0.5, 1 << 1}}); - PartitioningCompilation compilation(&model, devices); - ASSERT_EQ(compilation.finish(), Result::NO_ERROR); - const auto& cpuDeviceName = DeviceManager::getCpuDevice()->getName(); - checkExecutionPlanSteps(compilation.getExecutionPlan(), {cpuDeviceName}); -} - -// Test dynamic temporaries and related parts of the partitioning implementation. -// -// opnd0 = model input // tensor to pad -// opnd1 = model input // padding -// opnd2 = PAD(opnd1, opnd0) // model output -// opnd3 = PAD(opnd1, opnd0) -// opnd4 = ADD(opnd2, opnd3, FUSED_NONE) // model output -class DynamicTemporariesTest : public PartitioningTest { - protected: - // Call these functions in sequence in order to perform the test. - // Call to declareOutputDimensions() can be omitted (see the default values below). - // Call to declareHalVersions() can be omitted (defaults to HalVersion::LATEST). - void declareOutputDimensions(bool opnd2ModelAndPartitionOutputSpecified, - bool opnd3PartitionOutputSpecified, - bool opnd4ModelOutputSpecified); - void declareHalVersions(HalVersion padDeviceVersion, HalVersion addDeviceVersion); - void makeModelAndValidate(); - void compileModelAndComparePlan(bool noFallback = true); - void executeCompilationAndCompareOutput(bool opnd2ModelOutputBigEnough, - bool opnd4ModelOutputBigEnough); - - // set by declareOutputDimensions() - bool mOpnd2ModelAndPartitionOutputSpecified = false; - bool mOpnd3PartitionOutputSpecified = false; - bool mOpnd4ModelOutputSpecified = false; - - // set by declareHalVersions() - HalVersion mPadDeviceVersion = HalVersion::LATEST; - HalVersion mAddDeviceVersion = HalVersion::LATEST; - HalVersion mMinDeviceVersion = HalVersion::LATEST; // minimum of the other two device versions - - // created by makeModelAndValidate() - std::optional<PartitioningModel> mModel; - std::vector<uint32_t> mOpnds; - - // created by compileModelAndComparePlan(); - std::optional<PartitioningCompilation> mCompilation; - - static bool supportsOutputOfUnknownRank(HalVersion version) { - return version >= HalVersion::V1_2; - } - - static Dimensioned dimensionedOutput(HalVersion version, bool specified) { - return specified ? Dimensioned::YES_4 - : supportsOutputOfUnknownRank(version) ? Dimensioned::NO - : Dimensioned::RANK_1; - } -}; - -void DynamicTemporariesTest::declareOutputDimensions(bool opnd2ModelAndPartitionOutputSpecified, - bool opnd3PartitionOutputSpecified, - bool opnd4ModelOutputSpecified) { - ASSERT_FALSE(mModel.has_value()); - mOpnd2ModelAndPartitionOutputSpecified = opnd2ModelAndPartitionOutputSpecified; - mOpnd3PartitionOutputSpecified = opnd3PartitionOutputSpecified; - mOpnd4ModelOutputSpecified = opnd4ModelOutputSpecified; -} - -void DynamicTemporariesTest::declareHalVersions(HalVersion padDeviceVersion, - HalVersion addDeviceVersion) { - ASSERT_FALSE(mModel.has_value()); - mPadDeviceVersion = padDeviceVersion; - mAddDeviceVersion = addDeviceVersion; - mMinDeviceVersion = min(padDeviceVersion, addDeviceVersion); -} - -void DynamicTemporariesTest::makeModelAndValidate() { - ASSERT_FALSE(mModel.has_value()); - mModel = PartitioningModel(); - - uint32_t opndActivation = mModel->addIntScalarOperand(ANEURALNETWORKS_FUSED_NONE); - - uint32_t opnd0 = mModel->addFloatOperand(Dimensioned::YES_2); // tensor to pad - uint32_t opnd1 = mModel->addIntOperand(Dimensioned::RANK_2); // paddings - uint32_t opnd2 = mModel->addExplicitOperationXTo1( - ANEURALNETWORKS_PAD, {opnd0, opnd1}, WrapperType::TENSOR_FLOAT32, - dimensionedOutput(mMinDeviceVersion, mOpnd2ModelAndPartitionOutputSpecified)); - uint32_t opnd3 = mModel->addExplicitOperationXTo1( - ANEURALNETWORKS_PAD, {opnd0, opnd1}, WrapperType::TENSOR_FLOAT32, - dimensionedOutput(mMinDeviceVersion, mOpnd3PartitionOutputSpecified)); - uint32_t opnd4 = mModel->addExplicitOperationXTo1( - ANEURALNETWORKS_ADD, {opnd2, opnd3, opndActivation}, WrapperType::TENSOR_FLOAT32, - dimensionedOutput(mMinDeviceVersion, mOpnd4ModelOutputSpecified)); - mModel->identifyInputsAndOutputs({opnd0, opnd1}, {opnd2, opnd4}); - mModel->finish(); - ASSERT_TRUE(mModel->isValid()); - - mOpnds = {opnd0, opnd1, opnd2, opnd3, opnd4}; -} - -void DynamicTemporariesTest::compileModelAndComparePlan(bool noFallback) { - ASSERT_TRUE(mModel.has_value()); - ASSERT_TRUE(!mCompilation.has_value()); - - auto devices = makeDevices({{"pad", - 0.9, - 0U, - PartitioningDriver::OEMNo, - mPadDeviceVersion, - {V1_3::OperationType::PAD}}, - {"add", - 0.9, - 0U, - PartitioningDriver::OEMNo, - mAddDeviceVersion, - {V1_3::OperationType::ADD}}}); - - mCompilation = PartitioningCompilation(&mModel.value(), devices); - ASSERT_EQ(mCompilation->setPartitioning(DeviceManager::kPartitioningWithoutFallback), - Result::NO_ERROR); - if (noFallback) { - ASSERT_EQ(mCompilation->finish(), Result::NO_ERROR); - const ExecutionPlan& planA = mCompilation->getExecutionPlan(); - EXPECT_TRUE(planA.forTest_flatGetDynamicTemporaries() == - (mOpnd3PartitionOutputSpecified ? DynamicTemporariesType{} - : DynamicTemporariesType{mOpnds[3]})); - ASSERT_EQ(planA.forTest_getKind(), ExecutionPlan::Kind::COMPOUND); - const auto& stepsA = planA.forTest_compoundGetSteps(); - ASSERT_EQ(stepsA.size(), size_t(2)); - { - // Build a model to compare against the step model from stepsA[0]. - PartitioningModel modelA0; - uint32_t a0Opnd0 = modelA0.addFloatOperand(Dimensioned::YES_2); - uint32_t a0Opnd1 = modelA0.addIntOperand(Dimensioned::RANK_2); - uint32_t a0Opnd2 = modelA0.addExplicitOperationXTo1( - ANEURALNETWORKS_PAD, {a0Opnd0, a0Opnd1}, WrapperType::TENSOR_FLOAT32, - dimensionedOutput(mMinDeviceVersion, mOpnd3PartitionOutputSpecified)); - uint32_t a0Opnd3 = modelA0.addExplicitOperationXTo1( - ANEURALNETWORKS_PAD, {a0Opnd0, a0Opnd1}, WrapperType::TENSOR_FLOAT32, - dimensionedOutput(mMinDeviceVersion, mOpnd2ModelAndPartitionOutputSpecified)); - modelA0.identifyInputsAndOutputs({a0Opnd0, a0Opnd1}, {a0Opnd3, a0Opnd2}); - modelA0.finish(); - ASSERT_TRUE(modelA0.isValid()); - - ASSERT_NO_FATAL_FAILURE(compare( - stepsA[0], &modelA0, devices[0], - RemapVectorType{{mOpnds[0], a0Opnd0}, {mOpnds[1], a0Opnd1}}, // modelInputs - RemapVectorType{{mOpnds[2], a0Opnd3}}, // modelOutputs - RemapVectorType{}, // tempsAsStepModelInputs - StepModelOutputSetType{{mOpnds[3], a0Opnd2}}, // tempsAsStepModelOutputs - RemapVectorType{}, // outputsAsStepModelInputs - {0u})); // modelOutputsThatAreDownstreamInputs - } - { - // Build a model to compare against the step model from stepsA[1]. - PartitioningModel modelA1; - uint32_t a1Opnd2 = modelA1.addFloatOperand( - dimensionedOutput(mMinDeviceVersion, mOpnd2ModelAndPartitionOutputSpecified)); - uint32_t a1Opnd3 = modelA1.addFloatOperand( - dimensionedOutput(mMinDeviceVersion, mOpnd3PartitionOutputSpecified)); - uint32_t a1Opnd4 = modelA1.addOperation2To1V1_0( - 0, a1Opnd2, a1Opnd3, - dimensionedOutput(mMinDeviceVersion, mOpnd4ModelOutputSpecified)); - modelA1.identifyInputsAndOutputs({a1Opnd3, a1Opnd2}, {a1Opnd4}); - modelA1.finish(); - ASSERT_TRUE(modelA1.isValid()); - - ASSERT_NO_FATAL_FAILURE( - compare(stepsA[1], &modelA1, devices[1], RemapVectorType{}, // modelInputs - RemapVectorType{{mOpnds[4], a1Opnd4}}, // modelOutputs - RemapVectorType{{mOpnds[3], a1Opnd3}}, // tempsAsStepModelInputs - StepModelOutputSetType{}, // tempsAsStepModelOutputs - RemapVectorType{{mOpnds[2], a1Opnd2}}, // outputsAsStepModelInputs - {})); // modelOutputsThatAreDownstreamInputs - } - } else { - ASSERT_EQ(mCompilation->finish(), Result::OP_FAILED); - // Try again, expecting fallback. - mCompilation = PartitioningCompilation(&mModel.value(), devices); - ASSERT_EQ(mCompilation->setPartitioning(DeviceManager::kPartitioningWithFallback), - Result::NO_ERROR); - ASSERT_EQ(mCompilation->finish(), Result::NO_ERROR); - ASSERT_EQ(mCompilation->getExecutionPlan().forTest_getKind(), ExecutionPlan::Kind::SIMPLE); - ASSERT_EQ(mCompilation->getExecutionPlan().forTest_simpleGetDevice(), - DeviceManager::getCpuDevice()); - } -} - -void DynamicTemporariesTest::executeCompilationAndCompareOutput(bool opnd2ModelOutputBigEnough, - bool opnd4ModelOutputBigEnough) { - ASSERT_TRUE(opnd2ModelOutputBigEnough || !mOpnd2ModelAndPartitionOutputSpecified); - ASSERT_TRUE(opnd4ModelOutputBigEnough || !mOpnd4ModelOutputSpecified); - - ASSERT_TRUE(mCompilation.has_value()); - WrapperExecution e(&mCompilation.value()); - - WrapperOperandType padTensorValueType(WrapperType::TENSOR_FLOAT32, {2}); - const float padTensorValue[] = {3.0f, 5.0f}; - e.setInput(0, &padTensorValue, &padTensorValueType.operandType); - - WrapperOperandType paddingsType(WrapperType::TENSOR_INT32, {1, 2}); - const int paddings[1][2] = {{1, 1}}; - e.setInput(1, &paddings, &paddingsType.operandType); - - auto setOutput = [&e](uint32_t index, float* buffer, bool bigEnough, bool specified, - HalVersion version) { - const uint32_t elts = bigEnough ? 4 : 3; - std::fill(buffer, buffer + elts, -1.0f); - using DimsType = std::vector<uint32_t>; - WrapperOperandType outputType( - WrapperType::TENSOR_FLOAT32, - specified ? DimsType{elts} - : supportsOutputOfUnknownRank(version) ? DimsType{} : DimsType{0}); - e.setOutput(index, buffer, elts * sizeof(float), &outputType.operandType); - }; - float opnd2ModelOutput[4], opnd4ModelOutput[4]; - setOutput(0, opnd2ModelOutput, opnd2ModelOutputBigEnough, - mOpnd2ModelAndPartitionOutputSpecified, mPadDeviceVersion); - setOutput(1, opnd4ModelOutput, opnd4ModelOutputBigEnough, mOpnd4ModelOutputSpecified, - mAddDeviceVersion); - - const Result expectResult = opnd2ModelOutputBigEnough && opnd4ModelOutputBigEnough - ? Result::NO_ERROR - : Result::OUTPUT_INSUFFICIENT_SIZE; - ASSERT_EQ(e.compute(), expectResult); - if (expectResult == Result::NO_ERROR) { - float expected[4] = {0.0f, padTensorValue[0], padTensorValue[1], 0.0f}; - ASSERT_TRUE(std::equal(std::begin(opnd2ModelOutput), std::end(opnd2ModelOutput), - std::begin(expected))); - for (auto& elt : expected) { - elt *= 2; - } - ASSERT_TRUE(std::equal(std::begin(opnd4ModelOutput), std::end(opnd4ModelOutput), - std::begin(expected))); - } -} - -TEST_F(DynamicTemporariesTest, ModelOutputsSufficientSize) { - // The purpose of this test is to confirm that the partitioner and the - // runtime can handle a model output of unspecified dimensions but - // sufficient size that is written by one partition and read by another. - - ASSERT_NO_FATAL_FAILURE(declareOutputDimensions(/*opnd2ModelAndPartitionOutputSpecified=*/false, - /*opnd3PartitionOutputSpecified=*/true, - /*opnd4ModelOutputSpecified=*/false)); - ASSERT_NO_FATAL_FAILURE(makeModelAndValidate()); - ASSERT_NO_FATAL_FAILURE(compileModelAndComparePlan()); - ASSERT_NO_FATAL_FAILURE(executeCompilationAndCompareOutput(true, true)); -} - -// TODO(b/174851714): Fix the partitioner and re-enable this test. -TEST_F(DynamicTemporariesTest, DISABLED_ModelOutputsSufficientSize_V1_1) { - // The purpose of this test is to confirm that the partitioner and the - // runtime can handle a model output of unspecified dimensions but - // sufficient size that is written by one partition and read by another. - // Regression test for http://b/174851714. - - ASSERT_NO_FATAL_FAILURE(declareOutputDimensions(/*opnd2ModelAndPartitionOutputSpecified=*/false, - /*opnd3PartitionOutputSpecified=*/true, - /*opnd4ModelOutputSpecified=*/false)); - ASSERT_NO_FATAL_FAILURE(declareHalVersions(/*padDeviceVersion=*/HalVersion::V1_1, - /*addDeviceVersion=*/HalVersion::V1_1)); - ASSERT_NO_FATAL_FAILURE(makeModelAndValidate()); - ASSERT_NO_FATAL_FAILURE(compileModelAndComparePlan()); - ASSERT_NO_FATAL_FAILURE(executeCompilationAndCompareOutput(true, true)); -} - -TEST_F(DynamicTemporariesTest, DynamicTemporariesUnspecifiedOutputs) { - // The purpose of this test is to confirm that the partitioner can produce - // dynamic temporaries and that the runtime can handle them properly. Note - // that all model outputs are of unspecified dimensions but sufficient size. - - ASSERT_NO_FATAL_FAILURE(makeModelAndValidate()); - ASSERT_NO_FATAL_FAILURE(compileModelAndComparePlan()); - ASSERT_NO_FATAL_FAILURE(executeCompilationAndCompareOutput(true, true)); -} - -TEST_F(DynamicTemporariesTest, DynamicTemporariesSpecifiedOutputs) { - // The purpose of this test is to confirm that the partitioner can produce - // dynamic temporaries and that the runtime can handle them properly. Note - // that all model outputs are of specified dimensions. - - ASSERT_NO_FATAL_FAILURE(declareOutputDimensions(/*opnd2ModelAndPartitionOutputSpecified=*/true, - /*opnd3PartitionOutputSpecified=*/false, - /*opnd4ModelOutputSpecified=*/true)); - ASSERT_NO_FATAL_FAILURE(makeModelAndValidate()); - ASSERT_NO_FATAL_FAILURE(compileModelAndComparePlan()); - ASSERT_NO_FATAL_FAILURE(executeCompilationAndCompareOutput(true, true)); -} - -TEST_F(DynamicTemporariesTest, DynamicTemporariesSpecifiedOutputs_V1_2) { - // The purpose of this test is to confirm that the partitioner can produce - // dynamic temporaries and that the runtime can handle them properly. Note - // that all model outputs are of specified dimensions. - // Regression test for http://b/174851714. - - ASSERT_NO_FATAL_FAILURE(declareOutputDimensions(/*opnd2ModelAndPartitionOutputSpecified=*/true, - /*opnd3PartitionOutputSpecified=*/false, - /*opnd4ModelOutputSpecified=*/true)); - ASSERT_NO_FATAL_FAILURE(declareHalVersions(/*padDeviceVersion=*/HalVersion::V1_2, - /*addDeviceVersion=*/HalVersion::V1_2)); - ASSERT_NO_FATAL_FAILURE(makeModelAndValidate()); - ASSERT_NO_FATAL_FAILURE(compileModelAndComparePlan()); - ASSERT_NO_FATAL_FAILURE(executeCompilationAndCompareOutput(true, true)); -} - -TEST_F(DynamicTemporariesTest, DynamicTemporariesSpecifiedOutputs_V1_1) { - // The purpose of this test is to confirm that the partitioner cannot produce - // dynamic temporaries for V1_1 but instead does whole-model CPU fallback. Note - // that all model outputs are of specified dimensions. - // Regression test for http://b/174851714. - - ASSERT_NO_FATAL_FAILURE(declareOutputDimensions(/*opnd2ModelAndPartitionOutputSpecified=*/true, - /*opnd3PartitionOutputSpecified=*/false, - /*opnd4ModelOutputSpecified=*/true)); - ASSERT_NO_FATAL_FAILURE(declareHalVersions(/*padDeviceVersion=*/HalVersion::V1_1, - /*addDeviceVersion=*/HalVersion::V1_1)); - ASSERT_NO_FATAL_FAILURE(makeModelAndValidate()); - ASSERT_NO_FATAL_FAILURE(compileModelAndComparePlan(false)); - ASSERT_NO_FATAL_FAILURE(executeCompilationAndCompareOutput(true, true)); -} - -TEST_F(DynamicTemporariesTest, ModelOutputsInsufficientSizeWithDynamicTemporary) { - // The purpose of this test is to confirm that the runtime can detect a - // model output of insufficient size in the presence of a dynamic temporary. - - ASSERT_NO_FATAL_FAILURE(makeModelAndValidate()); - ASSERT_NO_FATAL_FAILURE(compileModelAndComparePlan()); - ASSERT_NO_FATAL_FAILURE(executeCompilationAndCompareOutput(false, false)); -} - -TEST_F(DynamicTemporariesTest, ModelOutputsInsufficientSizeWithoutDynamicTemporary) { - // The purpose of this test is to confirm that the runtime can detect a - // model output of insufficient size in the absence of a dynamic temporary. - - ASSERT_NO_FATAL_FAILURE(declareOutputDimensions(/*opnd2ModelAndPartitionOutputSpecified=*/false, - /*opnd3PartitionOutputSpecified=*/true, - /*opnd4ModelOutputSpecified=*/false)); - ASSERT_NO_FATAL_FAILURE(makeModelAndValidate()); - ASSERT_NO_FATAL_FAILURE(compileModelAndComparePlan()); - ASSERT_NO_FATAL_FAILURE(executeCompilationAndCompareOutput(false, false)); -} - -TEST_F(DynamicTemporariesTest, ModelOutput2InsufficientSizeWithoutDynamicTemporary) { - // The purpose of this test is to confirm that the runtime can detect a - // model output of insufficient size in the absence of a dynamic temporary. - - ASSERT_NO_FATAL_FAILURE(declareOutputDimensions(/*opnd2ModelAndPartitionOutputSpecified=*/false, - /*opnd3PartitionOutputSpecified=*/true, - /*opnd4ModelOutputSpecified=*/false)); - ASSERT_NO_FATAL_FAILURE(makeModelAndValidate()); - ASSERT_NO_FATAL_FAILURE(compileModelAndComparePlan()); - ASSERT_NO_FATAL_FAILURE(executeCompilationAndCompareOutput(false, true)); -} - -TEST_F(DynamicTemporariesTest, ModelOutput4InsufficientSizeWithoutDynamicTemporary) { - // The purpose of this test is to confirm that the runtime can detect a - // model output of insufficient size in the absence of a dynamic temporary. - - ASSERT_NO_FATAL_FAILURE(declareOutputDimensions(/*opnd2ModelAndPartitionOutputSpecified=*/false, - /*opnd3PartitionOutputSpecified=*/true, - /*opnd4ModelOutputSpecified=*/false)); - ASSERT_NO_FATAL_FAILURE(makeModelAndValidate()); - ASSERT_NO_FATAL_FAILURE(compileModelAndComparePlan()); - ASSERT_NO_FATAL_FAILURE(executeCompilationAndCompareOutput(true, false)); -} - // Test token rehashing during the compilation step. class CacheTest : public PartitioningTest { protected: @@ -3081,57 +2510,57 @@ TEST_F(PerfTest, Lookup) { // Derive an arbitrary (but reproducible) performance value from an OperandType. // We'll use this to ensure that we can save and then recover a type's performance. - auto typePerf = [](V1_3::OperandType type) { return float(static_cast<uint32_t>(type)); }; + auto typePerf = [](OperandType type) { return float(static_cast<uint32_t>(type)); }; - V1_3::Capabilities capabilities = ::android::nn::makeCapabilities(-1.0f); + Capabilities capabilities = makeCapabilities(-1.0f); - for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN); - type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) { - V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type); + for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN); + type <= static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX); ++type) { + OperandType operandType = static_cast<OperandType>(type); update(&capabilities, operandType, typePerf(operandType)); } - for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MIN); - type <= static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MAX); ++type) { - V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type); + for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::OEM_MIN); + type <= static_cast<uint32_t>(OperandTypeRange::OEM_MAX); ++type) { + OperandType operandType = static_cast<OperandType>(type); update(&capabilities, operandType, typePerf(operandType)); } // Make sure lookup retrieves the values stored by update - for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MIN); - type <= static_cast<uint32_t>(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) { - V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type); - if (operandType == V1_3::OperandType::SUBGRAPH) { + for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN); + type <= static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX); ++type) { + OperandType operandType = static_cast<OperandType>(type); + if (operandType == OperandType::SUBGRAPH) { // SUBGRAPH capabilities are handled differently. continue; } SCOPED_TRACE(toString(operandType)); EXPECT_EQ(lookupExecTime(capabilities, operandType), typePerf(operandType)); } - for (uint32_t type = static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MIN); - type <= static_cast<uint32_t>(V1_3::OperandTypeRange::OEM_MAX); ++type) { - V1_3::OperandType operandType = static_cast<V1_3::OperandType>(type); + for (uint32_t type = static_cast<uint32_t>(OperandTypeRange::OEM_MIN); + type <= static_cast<uint32_t>(OperandTypeRange::OEM_MAX); ++type) { + OperandType operandType = static_cast<OperandType>(type); SCOPED_TRACE(toString(operandType)); EXPECT_EQ(lookupExecTime(capabilities, operandType), typePerf(operandType)); } // Check the behavior of a missing type - V1_3::OperandType operandType = static_cast<V1_3::OperandType>( - static_cast<uint32_t>(V1_3::OperandTypeRange::BASE_MAX) + 1); + OperandType operandType = + static_cast<OperandType>(static_cast<uint32_t>(OperandTypeRange::BASE_MAX) + 1); EXPECT_EQ(lookupExecTime(capabilities, operandType), FLT_MAX); } class ControlFlowPartitioningTest : public PartitioningTest { protected: // opnd0 --> +-----+ - // | op0 | --> opnd2 + // | ADD | --> opnd2 // opnd1 --> +-----+ - std::unique_ptr<PartitioningModel> createBranchOrBodyModel(Dimensioned dimensioned) { + std::unique_ptr<PartitioningModel> createBranchOrBodyModel() { auto model = std::make_unique<PartitioningModel>(); - const uint32_t opnd0 = model->addFloatOperand(dimensioned); - const uint32_t opnd1 = model->addFloatOperand(dimensioned); - const uint32_t opnd2 = model->addOperation2To1V1_0(0, opnd0, opnd1, dimensioned); + const uint32_t opnd0 = model->addFloatOperand(); + const uint32_t opnd1 = model->addFloatOperand(); + const uint32_t opnd2 = model->addOperation2To1V1_0(0, opnd0, opnd1); model->identifyInputsAndOutputs({opnd0, opnd1}, {opnd2}); model->finish(); EXPECT_TRUE(model->isValid()); @@ -3141,10 +2570,10 @@ // opnd0 --> +-------+ // | EQUAL | --> opnd2 // opnd1 --> +-------+ - std::unique_ptr<PartitioningModel> createCondModel(Dimensioned dimensioned) { + std::unique_ptr<PartitioningModel> createCondModel() { auto model = std::make_unique<PartitioningModel>(); - const uint32_t opnd0 = model->addFloatOperand(dimensioned); - const uint32_t opnd1 = model->addFloatOperand(dimensioned); + const uint32_t opnd0 = model->addFloatOperand(); + const uint32_t opnd1 = model->addFloatOperand(); const uint32_t opnd2 = model->addExplicitOperationXTo1( ANEURALNETWORKS_EQUAL, {opnd0, opnd1}, WrapperType::TENSOR_BOOL8); model->identifyInputsAndOutputs({opnd0, opnd1}, {opnd2}); @@ -3157,17 +2586,17 @@ // opnd1 --> | IF | --> opnd3 // opnd2 --> +----+ std::vector<std::unique_ptr<PartitioningModel>> createIfModel( - Dimensioned dimensionedMain = Dimensioned::YES, - Dimensioned dimensionedThen = Dimensioned::YES, - Dimensioned dimensionedElse = Dimensioned::YES) { - auto thenModel = createBranchOrBodyModel(dimensionedThen); - auto elseModel = createBranchOrBodyModel(dimensionedElse); + bool firstOuterInputUnknownSize = false) { + auto thenModel = createBranchOrBodyModel(); + auto elseModel = createBranchOrBodyModel(); auto mainModel = std::make_unique<PartitioningModel>(); const uint32_t opnd0 = mainModel->addBooleanOperand(); - const uint32_t opnd1 = mainModel->addFloatOperand(dimensionedMain); - const uint32_t opnd2 = mainModel->addFloatOperand(dimensionedMain); - const uint32_t opnd3 = mainModel->addFloatOperand(dimensionedMain); + const uint32_t opnd1 = mainModel->addFloatOperand( + firstOuterInputUnknownSize ? PartitioningModel::Dimensioned::NO + : PartitioningModel::Dimensioned::YES); + const uint32_t opnd2 = mainModel->addFloatOperand(); + const uint32_t opnd3 = mainModel->addFloatOperand(); mainModel->addIfOperation(opnd0, *thenModel, *elseModel, {opnd1, opnd2}, {opnd3}); mainModel->identifyInputsAndOutputs({opnd0, opnd1, opnd2}, {opnd3}); mainModel->finish(); @@ -3184,16 +2613,16 @@ // | WHILE | --> opnd2 // opnd1 --> +-------+ std::vector<std::unique_ptr<PartitioningModel>> createWhileModel( - Dimensioned dimensionedMain = Dimensioned::YES, - Dimensioned dimensionedCond = Dimensioned::YES, - Dimensioned dimensionedBody = Dimensioned::YES) { - auto condModel = createCondModel(dimensionedCond); - auto bodyModel = createBranchOrBodyModel(dimensionedBody); + bool firstOuterInputUnknownSize = false) { + auto condModel = createCondModel(); + auto bodyModel = createBranchOrBodyModel(); auto mainModel = std::make_unique<PartitioningModel>(); - const uint32_t opnd0 = mainModel->addFloatOperand(dimensionedMain); - const uint32_t opnd1 = mainModel->addFloatOperand(dimensionedMain); - const uint32_t opnd2 = mainModel->addFloatOperand(dimensionedMain); + const uint32_t opnd0 = mainModel->addFloatOperand( + firstOuterInputUnknownSize ? PartitioningModel::Dimensioned::NO + : PartitioningModel::Dimensioned::YES); + const uint32_t opnd1 = mainModel->addFloatOperand(); + const uint32_t opnd2 = mainModel->addFloatOperand(); mainModel->addWhileOperation(*condModel, *bodyModel, {opnd0, opnd1}, {opnd2}); mainModel->identifyInputsAndOutputs({opnd0, opnd1}, {opnd2}); mainModel->finish(); @@ -3205,11 +2634,6 @@ models.push_back(std::move(bodyModel)); return std::move(models); } - - void testIfUnknownSize(Dimensioned dimensionedMain, Dimensioned dimensionedThen, - Dimensioned dimensionedElse); - void testWhileUnknownSize(Dimensioned dimensionedMain, Dimensioned dimensionedThen, - Dimensioned dimensionedElse); }; TEST_F(ControlFlowPartitioningTest, IF_Interpreted) { @@ -3222,7 +2646,15 @@ ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &plan), ANEURALNETWORKS_NO_ERROR); - checkExecutionPlanSteps(plan, {kIfStep, "V1_0", kGotoStep, "V1_0"}); + ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::COMPOUND); + const auto& steps = plan.forTest_compoundGetSteps(); + ASSERT_EQ(steps.size(), size_t(4)); + ASSERT_TRUE(steps[0]->isIf()); + ASSERT_TRUE(steps[1]->isExecution()); + ASSERT_TRUE(steps[2]->isGoto()); + ASSERT_TRUE(steps[3]->isExecution()); + ASSERT_EQ(steps[1]->executionStep()->getDevice()->getName(), "V1_0"); + ASSERT_EQ(steps[3]->executionStep()->getDevice()->getName(), "V1_0"); } TEST_F(ControlFlowPartitioningTest, WHILE_Interpreted) { @@ -3236,26 +2668,32 @@ ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &plan), ANEURALNETWORKS_NO_ERROR); - const auto& cpuDeviceName = DeviceManager::getCpuDevice()->getName(); - checkExecutionPlanSteps(plan, {kWhileStep, cpuDeviceName, kGotoStep, "V1_0", kGotoStep}); + ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::COMPOUND); + const auto& steps = plan.forTest_compoundGetSteps(); + ASSERT_EQ(steps.size(), size_t(5)); + ASSERT_TRUE(steps[0]->isWhile()); + ASSERT_TRUE(steps[1]->isExecution()); + ASSERT_TRUE(steps[2]->isGoto()); + ASSERT_TRUE(steps[3]->isExecution()); + ASSERT_TRUE(steps[4]->isGoto()); + ASSERT_EQ(steps[1]->executionStep()->getDevice()->getName(), + DeviceManager::getCpuDevice()->getName()); + ASSERT_EQ(steps[3]->executionStep()->getDevice()->getName(), "V1_0"); } TEST_F(ControlFlowPartitioningTest, IF_SimplePlan) { const auto models = createIfModel(); // The device supports all operations. - const auto devices = makeDevices({{"ALL", - 0.9, - ~0U, - PartitioningDriver::OEMNo, - HalVersion::LATEST, - {V1_3::OperationType::IF}}}); + const auto devices = + makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {OperationType::IF}}}); ExecutionPlan plan; ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &plan), ANEURALNETWORKS_NO_ERROR); - checkExecutionPlanSteps(plan, {"ALL"}); + ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); + ASSERT_EQ(plan.forTest_simpleGetDevice()->getName(), "ALL"); } TEST_F(ControlFlowPartitioningTest, WHILE_SimplePlan) { @@ -3266,69 +2704,35 @@ 0.9, ~0U, PartitioningDriver::OEMNo, - HalVersion::LATEST, - {V1_3::OperationType::WHILE, V1_3::OperationType::EQUAL}}}); + {OperationType::WHILE, OperationType::EQUAL}}}); ExecutionPlan plan; ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &plan), ANEURALNETWORKS_NO_ERROR); - checkExecutionPlanSteps(plan, {"ALL"}); + ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); + ASSERT_EQ(plan.forTest_simpleGetDevice()->getName(), "ALL"); } -void ControlFlowPartitioningTest::testIfUnknownSize(Dimensioned dimensionedMain, - Dimensioned dimensionedThen, - Dimensioned dimensionedElse) { - if (dimensionedMain != Dimensioned::NO && dimensionedThen != Dimensioned::NO && - dimensionedElse != Dimensioned::NO) { - // No unknown size. - return; - } - - const auto models = createIfModel(dimensionedMain, dimensionedThen, dimensionedElse); +TEST_F(ControlFlowPartitioningTest, IF_UnknownSize) { + const auto models = createIfModel(/*firstOuterInputUnknownSize=*/true); // The device supports all operations but the partitioner ignores its IF // support due to http://b/159076604#comment5. - const auto devices = makeDevices({{"ALL", - 0.9, - ~0U, - PartitioningDriver::OEMNo, - HalVersion::LATEST, - {V1_3::OperationType::IF}}}); + const auto devices = + makeDevices({{"ALL", 0.9, ~0U, PartitioningDriver::OEMNo, {OperationType::IF}}}); ExecutionPlan plan; ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &plan), ANEURALNETWORKS_NO_ERROR); // The control flow interpreter does not support unknown size (b/132458982). - checkExecutionPlanSteps(plan, {DeviceManager::getCpuDevice()->getName()}); + ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); + ASSERT_EQ(plan.forTest_simpleGetDevice()->getName(), DeviceManager::getCpuDevice()->getName()); } -TEST_F(ControlFlowPartitioningTest, IF_UnknownSize) { - const std::vector<Dimensioned> configurations = {Dimensioned::NO, Dimensioned::YES}; - for (Dimensioned dimensionedMain : configurations) { - SCOPED_TRACE(testing::Message() << "dimensionedMain: " << toString(dimensionedMain)); - for (Dimensioned dimensionedThen : configurations) { - SCOPED_TRACE(testing::Message() << "dimensionedThen: " << toString(dimensionedThen)); - for (Dimensioned dimensionedElse : configurations) { - SCOPED_TRACE(testing::Message() - << "dimensionedElse: " << toString(dimensionedElse)); - testIfUnknownSize(dimensionedMain, dimensionedThen, dimensionedElse); - } - } - } -} - -void ControlFlowPartitioningTest::testWhileUnknownSize(Dimensioned dimensionedMain, - Dimensioned dimensionedCond, - Dimensioned dimensionedBody) { - if (dimensionedMain != Dimensioned::NO && dimensionedCond != Dimensioned::NO && - dimensionedBody != Dimensioned::NO) { - // No unknown size. - return; - } - - const auto models = createWhileModel(dimensionedMain, dimensionedCond, dimensionedBody); +TEST_F(ControlFlowPartitioningTest, WHILE_UnknownSize) { + const auto models = createWhileModel(/*firstOuterInputUnknownSize=*/true); // The device supports all operations but the partitioner ignores its WHILE // support due to http://b/159076604#comment5. @@ -3336,365 +2740,15 @@ 0.9, ~0U, PartitioningDriver::OEMNo, - HalVersion::LATEST, - {V1_3::OperationType::WHILE, V1_3::OperationType::EQUAL}}}); + {OperationType::WHILE, OperationType::EQUAL}}}); ExecutionPlan plan; ASSERT_EQ(models[0]->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, ExecutePriority::DEFAULT, {}, &plan), ANEURALNETWORKS_NO_ERROR); // The control flow interpreter does not support unknown size (b/132458982). - checkExecutionPlanSteps(plan, {DeviceManager::getCpuDevice()->getName()}); -} - -TEST_F(ControlFlowPartitioningTest, WHILE_UnknownSize) { - const std::vector<Dimensioned> configurations = {Dimensioned::NO, Dimensioned::YES}; - for (Dimensioned dimensionedMain : configurations) { - SCOPED_TRACE(testing::Message() << "dimensionedMain: " << toString(dimensionedMain)); - for (Dimensioned dimensionedCond : configurations) { - SCOPED_TRACE(testing::Message() << "dimensionedCond: " << toString(dimensionedCond)); - for (Dimensioned dimensionedBody : configurations) { - SCOPED_TRACE(testing::Message() - << "dimensionedBody: " << toString(dimensionedBody)); - testWhileUnknownSize(dimensionedMain, dimensionedCond, dimensionedBody); - } - } - } -} - -// Test the memory step role analysis of the partitioning implementation. -class MemoryStepRoleTest : public PartitioningTest { - protected: - // A tuple of {device_name, input/output} - using TestStepRole = std::tuple<std::string, IOType>; - - void SetUp() override { - PartitioningTest::SetUp(); - mModel = std::make_unique<PartitioningModel>(); - } - - static std::string toString(SourceOperandIndex index) { - return "{" + std::to_string(index.first) + ", " + std::to_string(index.second) + "}"; - } - - static std::string toString(const std::set<TestStepRole>& roles) { - std::stringstream ss; - ss << "[ "; - for (const auto& [deviceName, type] : roles) { - ss << "{" << deviceName << ", " << (type == IOType::INPUT ? "INPUT" : "OUTPUT") << "} "; - } - ss << "]"; - return ss.str(); - } - - void finishAndPartitionModelForDevices(const std::vector<std::shared_ptr<Device>>& devices) { - mModel->finish(); - ASSERT_TRUE(mModel->isValid()); - ASSERT_EQ(mModel->partitionTheWork(devices, ExecutePreference::PREFER_LOW_POWER, - ExecutePriority::DEFAULT, {}, &mPlan), - ANEURALNETWORKS_NO_ERROR); - } - - void checkStepRolesOfInput(uint32_t index, const std::set<TestStepRole>& expected) const { - SCOPED_TRACE("Input: " + std::to_string(index)); - std::set<TestStepRole> actual; - mPlan.forEachStepRoleOfInput( - index, [&actual](const auto* preparedModel, IOType type, uint32_t) { - actual.emplace(preparedModel->getDevice()->getName(), type); - }); - EXPECT_TRUE(expected == actual) - << "expected: " << toString(expected) << ", actual: " << toString(actual); - } - - void checkStepRolesOfOutput(uint32_t index, const std::set<TestStepRole>& expected) const { - SCOPED_TRACE("Output: " + std::to_string(index)); - std::set<TestStepRole> actual; - mPlan.forEachStepRoleOfOutput( - index, [&actual](const auto* preparedModel, IOType type, uint32_t) { - actual.emplace(preparedModel->getDevice()->getName(), type); - }); - EXPECT_TRUE(expected == actual) - << "expected: " << toString(expected) << ", actual: " << toString(actual); - } - - void checkStepRolesOfSourceOperand(SourceOperandIndex index, - const std::set<TestStepRole>& expected) const { - SCOPED_TRACE("SourceOperandIndex: " + toString(index)); - std::set<TestStepRole> actual; - mPlan.forTest_compoundForEachStepRoleOfSourceOperand( - index, [&actual](const auto* preparedModel, IOType type, uint32_t) { - actual.emplace(preparedModel->getDevice()->getName(), type); - }); - EXPECT_TRUE(expected == actual) - << "expected: " << toString(expected) << ", actual: " << toString(actual); - } - - std::unique_ptr<PartitioningModel> mModel; - ExecutionPlan mPlan; -}; - -// Test a graph with 3 operations, each operation in a separate partition: -// opnd2 = OP0(opnd0, opnd1) -// opnd4 = OP1(opnd1, opnd3) -// opnd5 = OP2(opnd2, opnd4) -TEST_F(MemoryStepRoleTest, NoControlFlow) { - const uint32_t opnd0 = mModel->addFloatOperand(); - const uint32_t opnd1 = mModel->addFloatOperand(); - const uint32_t opnd2 = mModel->addOperation2To1V1_0(0, opnd0, opnd1); - const uint32_t opnd3 = mModel->addFloatOperand(); - const uint32_t opnd4 = mModel->addOperation2To1V1_0(1, opnd1, opnd3); - const uint32_t opnd5 = mModel->addOperation2To1V1_0(2, opnd2, opnd4); - mModel->identifyInputsAndOutputs({opnd0, opnd1, opnd3}, {opnd2, opnd5}); - - // This will result in 3 partitions: - // deviceA handles op0, deviceB handles op1, deviceC handles op2. - const auto devices = makeDevices( - {{"deviceA", 0.8, ~0U}, {"deviceB", 0.5, 1 << 1}, {"deviceC", 0.5, 1 << 2}}); - finishAndPartitionModelForDevices(devices); - checkExecutionPlanSteps(mPlan, {"deviceB", "deviceA", "deviceC"}); - - // Check the step roles of the main model inputs and outputs: - // - // input0 and input2 are each exclusive for a single partition. - checkStepRolesOfInput(0, {{"deviceA", IOType::INPUT}}); - checkStepRolesOfInput(2, {{"deviceB", IOType::INPUT}}); - // input1 is shared by two operations in different partitions. - checkStepRolesOfInput(1, {{"deviceA", IOType::INPUT}, {"deviceB", IOType::INPUT}}); - // output0 is a model output that is a downstream input. - checkStepRolesOfOutput(0, {{"deviceA", IOType::OUTPUT}, {"deviceC", IOType::INPUT}}); - // output1 is only used in a single partition. - checkStepRolesOfOutput(1, {{"deviceC", IOType::OUTPUT}}); - - // Check the step roles of the partition boundary temporaries that we will allocate memory on - // behalf of (see ExecutionPlan::makeController for the allocation logic): - // - // opnd4 is a partition boundary temporary. - checkStepRolesOfSourceOperand({0, opnd4}, - {{"deviceB", IOType::OUTPUT}, {"deviceC", IOType::INPUT}}); -} - -// Test a graph with an interpreted IF operation. -TEST_F(MemoryStepRoleTest, InterpretedIf) { - auto thenModel = std::make_unique<PartitioningModel>(); - const uint32_t thenOpnd0 = thenModel->addFloatOperand(); - const uint32_t thenOpnd1 = thenModel->addFloatOperand(); - const uint32_t thenOpnd2 = thenModel->addOperation2To1V1_0(0, thenOpnd0, thenOpnd1); - thenModel->identifyInputsAndOutputs({thenOpnd0, thenOpnd1}, {thenOpnd2}); - thenModel->finish(); - EXPECT_TRUE(thenModel->isValid()); - - auto elseModel = std::make_unique<PartitioningModel>(); - const uint32_t elseOpnd0 = elseModel->addFloatOperand(); - const uint32_t elseOpnd1 = elseModel->addFloatOperand(); - const uint32_t elseOpnd2 = elseModel->addOperation2To1V1_0(1, elseOpnd0, elseOpnd1); - elseModel->identifyInputsAndOutputs({elseOpnd0, elseOpnd1}, {elseOpnd2}); - elseModel->finish(); - EXPECT_TRUE(elseModel->isValid()); - - const uint32_t mainOpnd0 = mModel->addBooleanOperand(); - const uint32_t mainOpnd1 = mModel->addFloatOperand(); - const uint32_t mainOpnd2 = mModel->addFloatOperand(); - const uint32_t mainOpnd3 = mModel->addFloatOperand(); - mModel->addIfOperation(mainOpnd0, *thenModel, *elseModel, {mainOpnd1, mainOpnd2}, {mainOpnd3}); - mModel->identifyInputsAndOutputs({mainOpnd0, mainOpnd1, mainOpnd2}, {mainOpnd3}); - - // deviceA handles op0, deviceB handles op1. - const auto devices = makeDevices({{"deviceA", 0.8, ~0U}, {"deviceB", 0.5, 1 << 1}}); - finishAndPartitionModelForDevices(devices); - checkExecutionPlanSteps(mPlan, {kIfStep, "deviceA", kGotoStep, "deviceB"}); - - // Check the step roles of the main model inputs and outputs: - // - // input0 is a condition operand of the interpreted IF that will only be read by the runtime. - checkStepRolesOfInput(0, {}); - // input1 and input2 are outer inputs of the interpreted IF. The memories may be directly used - // by the input operands of the then and else model. - checkStepRolesOfInput(1, {{"deviceA", IOType::INPUT}, {"deviceB", IOType::INPUT}}); - checkStepRolesOfInput(2, {{"deviceA", IOType::INPUT}, {"deviceB", IOType::INPUT}}); - // output0 is the outer output of the interpreted IF. The memory may be directly - // used by the output operands of the then and else model. - checkStepRolesOfOutput(0, {{"deviceA", IOType::OUTPUT}, {"deviceB", IOType::OUTPUT}}); - - // There is no partition boundary temporary in this model that we will allocate memory on - // behalf of (see ExecutionPlan::makeController for the allocation logic). -} - -// Test a graph with an interpreted WHILE operation. -TEST_F(MemoryStepRoleTest, InterpretedWhile) { - // Condition model: - // condOpnd3 = OP0(condOpnd0, condOpnd1) - // condOpnd4 = EQUAL(condOpnd2, condOpnd3) - auto condModel = std::make_unique<PartitioningModel>(); - const uint32_t condOpnd0 = condModel->addFloatOperand(); - const uint32_t condOpnd1 = condModel->addFloatOperand(); - const uint32_t condOpnd2 = condModel->addFloatOperand(); - const uint32_t condOpnd3 = condModel->addOperation2To1V1_0(0, condOpnd0, condOpnd1); - const uint32_t condOpnd4 = condModel->addExplicitOperationXTo1( - ANEURALNETWORKS_EQUAL, {condOpnd2, condOpnd3}, WrapperType::TENSOR_BOOL8); - condModel->identifyInputsAndOutputs({condOpnd0, condOpnd1, condOpnd2}, {condOpnd4}); - condModel->finish(); - EXPECT_TRUE(condModel->isValid()); - - // Body model: - // bodyOpnd3 = OP1(bodyOpnd0, bodyOpnd1) - // bodyOpnd4 = OP1(bodyOpnd0, bodyOpnd2) - auto bodyModel = std::make_unique<PartitioningModel>(); - const uint32_t bodyOpnd0 = bodyModel->addFloatOperand(); - const uint32_t bodyOpnd1 = bodyModel->addFloatOperand(); - const uint32_t bodyOpnd2 = bodyModel->addFloatOperand(); - const uint32_t bodyOpnd3 = bodyModel->addOperation2To1V1_0(1, bodyOpnd0, bodyOpnd1); - const uint32_t bodyOpnd4 = bodyModel->addOperation2To1V1_0(1, bodyOpnd0, bodyOpnd2); - bodyModel->identifyInputsAndOutputs({bodyOpnd0, bodyOpnd1, bodyOpnd2}, {bodyOpnd3, bodyOpnd4}); - bodyModel->finish(); - EXPECT_TRUE(bodyModel->isValid()); - - const uint32_t mainOpnd0 = mModel->addFloatOperand(); - const uint32_t mainOpnd1 = mModel->addFloatOperand(); - const uint32_t mainOpnd2 = mModel->addFloatOperand(); - const uint32_t mainOpnd3 = mModel->addFloatOperand(); - mModel->addWhileOperation(*condModel, *bodyModel, {mainOpnd0, mainOpnd1, mainOpnd2}, - {mainOpnd3}); - mModel->identifyInputsAndOutputs({mainOpnd0, mainOpnd1, mainOpnd2}, {mainOpnd3}); - - // deviceA handles the cond model, deviceB handles the body model. - const auto devices = makeDevices({{"deviceA", - 0.8, - ~0U, - PartitioningDriver::OEMNo, - HalVersion::LATEST, - {V1_3::OperationType::EQUAL}}, - {"deviceB", 0.5, 1 << 1}}); - finishAndPartitionModelForDevices(devices); - checkExecutionPlanSteps(mPlan, {kWhileStep, "deviceA", kGotoStep, "deviceB", kGotoStep}); - - // The subgraph indexes of the condition and body models of the WHILE operation. - const uint32_t condModelIndex = 1; - const uint32_t bodyModelIndex = 2; - - // Check the step roles of the main model inputs and outputs: - // - // input0 (input-output), input1 (state-only), and input2 (input-only) are outer inputs of the - // interpreted WHILE. The memories may be directly used by the input operands of the condition - // and body models. - checkStepRolesOfInput(0, {{"deviceA", IOType::INPUT}, {"deviceB", IOType::INPUT}}); - checkStepRolesOfInput(1, {{"deviceA", IOType::INPUT}, {"deviceB", IOType::INPUT}}); - checkStepRolesOfInput(2, {{"deviceA", IOType::INPUT}, {"deviceB", IOType::INPUT}}); - // output0 is an outer output of the interpreted WHILE that will only be written by the runtime. - checkStepRolesOfOutput(0, {}); - - // Check the step roles of the partition boundary temporaries that we will allocate memory on - // behalf of (see ExecutionPlan::makeController for the allocation logic): - // - // condOpnd4 is output of the interpreted WHILE condition model. - checkStepRolesOfSourceOperand({condModelIndex, condOpnd4}, {{"deviceA", IOType::OUTPUT}}); - // bodyOpnd3 (input-output) and bodyOpnd4 (state-only) are outputs of the interpreted WHILE body - // model. The memories may be directly used by the input operands of the condition and body - // models. - checkStepRolesOfSourceOperand( - {bodyModelIndex, bodyOpnd3}, - {{"deviceA", IOType::INPUT}, {"deviceB", IOType::INPUT}, {"deviceB", IOType::OUTPUT}}); - checkStepRolesOfSourceOperand( - {bodyModelIndex, bodyOpnd4}, - {{"deviceA", IOType::INPUT}, {"deviceB", IOType::INPUT}, {"deviceB", IOType::OUTPUT}}); -} - -// Test a graph with nested interpreted control flow operations: a WHILE operation with IF operation -// in the body model. -TEST_F(MemoryStepRoleTest, NestedInterpretedControlFlow) { - auto condModel = std::make_unique<PartitioningModel>(); - const uint32_t condOpnd0 = condModel->addFloatOperand(); - const uint32_t condOpnd1 = condModel->addFloatOperand(); - const uint32_t condOpnd2 = condModel->addBooleanOperand(); - const uint32_t condOpnd3 = condModel->addExplicitOperationXTo1( - ANEURALNETWORKS_EQUAL, {condOpnd0, condOpnd1}, WrapperType::TENSOR_BOOL8); - condModel->identifyInputsAndOutputs({condOpnd0, condOpnd1, condOpnd2}, {condOpnd3}); - condModel->finish(); - EXPECT_TRUE(condModel->isValid()); - - auto thenModel = std::make_unique<PartitioningModel>(); - const uint32_t thenOpnd0 = thenModel->addFloatOperand(); - const uint32_t thenOpnd1 = thenModel->addFloatOperand(); - const uint32_t thenOpnd2 = thenModel->addOperation2To1V1_0(0, thenOpnd0, thenOpnd1); - thenModel->identifyInputsAndOutputs({thenOpnd0, thenOpnd1}, {thenOpnd2}); - thenModel->finish(); - EXPECT_TRUE(thenModel->isValid()); - - auto elseModel = std::make_unique<PartitioningModel>(); - const uint32_t elseOpnd0 = elseModel->addFloatOperand(); - const uint32_t elseOpnd1 = elseModel->addFloatOperand(); - const uint32_t elseOpnd2 = elseModel->addOperation2To1V1_0(1, elseOpnd0, elseOpnd1); - elseModel->identifyInputsAndOutputs({elseOpnd0, elseOpnd1}, {elseOpnd2}); - elseModel->finish(); - EXPECT_TRUE(elseModel->isValid()); - - auto bodyModel = std::make_unique<PartitioningModel>(); - const uint32_t bodyOpnd0 = bodyModel->addFloatOperand(); - const uint32_t bodyOpnd1 = bodyModel->addFloatOperand(); - const uint32_t bodyOpnd2 = bodyModel->addBooleanOperand(); - const uint32_t bodyOpnd3 = bodyModel->addFloatOperand(); - bodyModel->addIfOperation(bodyOpnd2, *thenModel, *elseModel, {bodyOpnd0, bodyOpnd1}, - {bodyOpnd3}); - bodyModel->identifyInputsAndOutputs({bodyOpnd0, bodyOpnd1, bodyOpnd2}, {bodyOpnd3}); - bodyModel->finish(); - EXPECT_TRUE(bodyModel->isValid()); - - const uint32_t mainOpnd0 = mModel->addFloatOperand(); - const uint32_t mainOpnd1 = mModel->addFloatOperand(); - const uint32_t mainOpnd2 = mModel->addBooleanOperand(); - const uint32_t mainOpnd3 = mModel->addFloatOperand(); - mModel->addWhileOperation(*condModel, *bodyModel, {mainOpnd0, mainOpnd1, mainOpnd2}, - {mainOpnd3}); - mModel->identifyInputsAndOutputs({mainOpnd0, mainOpnd1, mainOpnd2}, {mainOpnd3}); - - // deviceA handles the cond model, deviceB handles the then model, - // deviceC handles the else model. - const auto devices = makeDevices({{"deviceA", - 0.8, - ~0U, - PartitioningDriver::OEMNo, - HalVersion::LATEST, - {V1_3::OperationType::EQUAL}}, - {"deviceB", 0.5, 1 << 0}, - {"deviceC", 0.5, 1 << 1}}); - finishAndPartitionModelForDevices(devices); - checkExecutionPlanSteps(mPlan, {kWhileStep, "deviceA", kGotoStep, kIfStep, "deviceB", kGotoStep, - "deviceC", kGotoStep}); - - // The subgraph indexes of the condition and body models of the WHILE operation. - const uint32_t condModelIndex = 1; - const uint32_t bodyModelIndex = 2; - - // Check the step roles of the main model inputs and outputs: - // - // input0 and input1 are outer inputs of the interpreted WHILE. The memories may be directly - // used by the input operands of the condition and body models, and then be directly used by the - // input operands of the then and else model of the interpreted IF in the body model. - checkStepRolesOfInput( - 0, - {{"deviceA", IOType::INPUT}, {"deviceB", IOType::INPUT}, {"deviceC", IOType::INPUT}}); - checkStepRolesOfInput( - 1, - {{"deviceA", IOType::INPUT}, {"deviceB", IOType::INPUT}, {"deviceC", IOType::INPUT}}); - // input2 is also an outer input of the interpreted WHILE. The memory has no step role in the - // condition model. In the body model, the memory will be used by the condition operand of the - // interpreted IF that will only be read by the runtime. - checkStepRolesOfInput(2, {}); - // output0 is an outer output of the interpreted WHILE that will only be written by the runtime. - checkStepRolesOfOutput(0, {}); - - // Check the step roles of the partition boundary temporaries that we will allocate memory on - // behalf of (see ExecutionPlan::makeController for the allocation logic): - // - // condOpnd2 is output of the interpreted WHILE condition model. - checkStepRolesOfSourceOperand({condModelIndex, condOpnd3}, {{"deviceA", IOType::OUTPUT}}); - // bodyOpnd3 is output of the interpreted WHILE body model. The memories may be directly used by - // the input operands of the condition and body models, and then be directly used by the - // input operands of the then and else model of the interpreted IF in the body model. - checkStepRolesOfSourceOperand({bodyModelIndex, bodyOpnd3}, {{"deviceA", IOType::INPUT}, - {"deviceB", IOType::INPUT}, - {"deviceB", IOType::OUTPUT}, - {"deviceC", IOType::INPUT}, - {"deviceC", IOType::OUTPUT}}); + ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); + ASSERT_EQ(plan.forTest_simpleGetDevice()->getName(), DeviceManager::getCpuDevice()->getName()); } } // namespace
diff --git a/runtime/test/TestPartitioningRandom.cpp b/runtime/test/TestPartitioningRandom.cpp index 3a68694..ad90071 100644 --- a/runtime/test/TestPartitioningRandom.cpp +++ b/runtime/test/TestPartitioningRandom.cpp
@@ -14,13 +14,7 @@ * limitations under the License. */ -#include <HalInterfaces.h> -#include <SampleDriver.h> -#include <ValidateHal.h> #include <android-base/logging.h> -#include <android/hardware/neuralnetworks/1.0/ADevice.h> -#include <android/hardware/neuralnetworks/1.1/ADevice.h> -#include <android/hardware/neuralnetworks/1.2/ADevice.h> #include <gtest/gtest.h> #include <unistd.h> @@ -38,23 +32,20 @@ #include <vector> #include "CompilationBuilder.h" -#include "HalUtils.h" +#include "HalInterfaces.h" #include "Manager.h" #include "ModelBuilder.h" #include "NeuralNetworks.h" +#include "SampleDriver.h" #include "TestNeuralNetworksWrapper.h" +#include "Utils.h" +#include "ValidateHal.h" // Uncomment the following line to generate some debugging output that // may be useful when analyzing failures: // // #define VERBOSE VERBOSE -// Uncomment the following line to generate some debugging output that -// may be useful to determine test coverage for support of dynamic -// temporaries (http://b/132458982): -// -// #define TRACE_DYNTEMP TRACE_DYNTEMP - // We randomly generate tests (model + input data) at runtime, and verify // that we get the same results whether we do partitioned compilation/execution // or non partitioned compilation/execution. We perform a test as follows: @@ -98,20 +89,13 @@ namespace android { -namespace V1_0 = ::android::hardware::neuralnetworks::V1_0; -namespace V1_1 = ::android::hardware::neuralnetworks::V1_1; -namespace V1_2 = ::android::hardware::neuralnetworks::V1_2; -namespace V1_3 = ::android::hardware::neuralnetworks::V1_3; +using namespace nn::hal; using CompilationBuilder = nn::CompilationBuilder; -using DeviceManager = nn::DeviceManager; using Device = nn::Device; -using SharedDevice = nn::SharedDevice; +using DeviceManager = nn::DeviceManager; using ExecutionPlan = nn::ExecutionPlan; -using ExecutionStep = nn::ExecutionStep; -using HalCacheToken = nn::HalCacheToken; using HalVersion = nn::HalVersion; using HidlModel = V1_3::Model; -using LogicalStep = nn::LogicalStep; using ModelBuilder = nn::ModelBuilder; using Result = nn::test_wrapper::Result; using SampleDriver = nn::sample_driver::SampleDriver; @@ -236,7 +220,7 @@ using WrapperCompilation::finish; Result setPartitioning(uint32_t partitioning) { - return static_cast<Result>(builder()->forTest_setPartitioning(partitioning)); + return static_cast<Result>(builder()->setPartitioning(partitioning)); } const ExecutionPlan& getExecutionPlan() const { return builder()->forTest_getExecutionPlan(); } @@ -324,7 +308,7 @@ // region offset within memory, region length. std::vector<std::tuple<unsigned, uint32_t, uint32_t>> mRegions; - // For validity checking. + // For sanity checking. bool mLayoutDone = false; }; @@ -345,11 +329,11 @@ public: RandomPartitioningTest() : mRandNumEng(GetParam() /* seed */), mRandNumUnitDist(0.0, 1.0) {} - static Signature getSignature(const HidlModel& model, const V1_3::Operation& operation); + static Signature getSignature(const HidlModel& model, const Operation& operation); protected: - static SharedDevice makeTestDriver(HalVersion version, const char* name, - std::set<Signature> signatures); + static V1_0::IDevice* makeTestDriver(HalVersion version, const char* name, + std::set<Signature> signatures); static HalVersion getMinHalVersion(ANeuralNetworksOperationType type); @@ -510,8 +494,7 @@ return kOperationToVersion.at(type); } -Signature RandomPartitioningTest::getSignature(const HidlModel& model, - const V1_3::Operation& operation) { +Signature RandomPartitioningTest::getSignature(const HidlModel& model, const Operation& operation) { static const auto kOperationToActivation = [] { std::map<ANeuralNetworksOperationType, int> result; for (const auto& pattern : kOperationPatterns) { @@ -527,10 +510,9 @@ return Signature(operationType, -1); } - const V1_3::Operand& operand = - model.main.operands[operation.inputs[activationFunctionInputIndex]]; - CHECK(operand.lifetime == V1_3::OperandLifeTime::CONSTANT_COPY); - CHECK(operand.type == V1_3::OperandType::INT32); + const Operand& operand = model.main.operands[operation.inputs[activationFunctionInputIndex]]; + CHECK(operand.lifetime == OperandLifeTime::CONSTANT_COPY); + CHECK(operand.type == OperandType::INT32); int32_t value; memcpy(&value, &model.operandValues[operand.location.offset], operand.location.length); return Signature(operationType, value); @@ -558,21 +540,21 @@ TestDriver(const char* name, std::set<Signature> signatures) : SampleDriver(name), mSignatures(std::move(signatures)) {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override { + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb _hidl_cb) override { android::nn::initVLogMask(); - const V1_0::PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f}; - V1_3::Capabilities capabilities = { + const PerformanceInfo kPerf = {.execTime = 0.75f, .powerUsage = 0.75f}; + Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = kPerf, .relaxedFloat32toFloat16PerformanceTensor = kPerf, .operandPerformance = nn::nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf), .ifPerformance = kPerf, .whilePerformance = kPerf}; _hidl_cb(V1_3::ErrorStatus::NONE, capabilities); - return hardware::Void(); + return Void(); } - hardware::Return<void> getSupportedOperations_1_3(const HidlModel& model, - getSupportedOperations_1_3_cb cb) override { + Return<void> getSupportedOperations_1_3(const HidlModel& model, + getSupportedOperations_1_3_cb cb) override { if (nn::validateModel(model)) { const size_t count = model.main.operations.size(); std::vector<bool> supported(count); @@ -584,20 +566,19 @@ } else { cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {}); } - return hardware::Void(); + return Void(); } - hardware::Return<V1_3::ErrorStatus> prepareModel_1_3( - const HidlModel& model, V1_1::ExecutionPreference preference, V1_3::Priority priority, - const V1_3::OptionalTimePoint& deadline, - const hardware::hidl_vec<hardware::hidl_handle>& modelCache, - const hardware::hidl_vec<hardware::hidl_handle>& dataCache, const HalCacheToken& token, + Return<V1_3::ErrorStatus> prepareModel_1_3( + const HidlModel& model, ExecutionPreference preference, Priority priority, + const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache, + const hidl_vec<hidl_handle>& dataCache, const CacheToken& token, const sp<V1_3::IPreparedModelCallback>& callback) override { // NOTE: We verify that all operations in the model are supported. V1_3::ErrorStatus outStatus = V1_3::ErrorStatus::INVALID_ARGUMENT; auto ret = getSupportedOperations_1_3( model, [&outStatus](V1_3::ErrorStatus inStatus, - const hardware::hidl_vec<bool>& supportedOperations) { + const hidl_vec<bool>& supportedOperations) { if (inStatus == V1_3::ErrorStatus::NONE) { if (std::all_of(supportedOperations.begin(), supportedOperations.end(), [](bool v) { return v; })) { @@ -618,43 +599,148 @@ const std::set<Signature> mSignatures; }; -class TestDriverV1_2 : public V1_2::ADevice { +// Like TestDriver, but implementing 1.2 +class TestDriverV1_2 : public V1_2::IDevice { public: TestDriverV1_2(const char* name, std::set<Signature> signatures) - : V1_2::ADevice(new TestDriver(name, std::move(signatures))) {} + : mLatestDriver(new TestDriver(name, std::move(signatures))) {} + Return<void> getCapabilities_1_2(getCapabilities_1_2_cb _hidl_cb) override { + return mLatestDriver->getCapabilities_1_2(_hidl_cb); + } + Return<void> getSupportedOperations_1_2(const V1_2::Model& model, + getSupportedOperations_1_2_cb _hidl_cb) override { + return mLatestDriver->getSupportedOperations_1_2(model, _hidl_cb); + } + Return<V1_0::ErrorStatus> prepareModel_1_2( + const V1_2::Model& model, ExecutionPreference preference, + const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache, + const CacheToken& token, + const sp<V1_2::IPreparedModelCallback>& actualCallback) override { + return mLatestDriver->prepareModel_1_2(model, preference, modelCache, dataCache, token, + actualCallback); + } + Return<void> getVersionString(getVersionString_cb _hidl_cb) override { + return mLatestDriver->getVersionString(_hidl_cb); + } + Return<void> getType(getType_cb _hidl_cb) override { return mLatestDriver->getType(_hidl_cb); } + Return<void> getSupportedExtensions(getSupportedExtensions_cb _hidl_cb) { + return mLatestDriver->getSupportedExtensions(_hidl_cb); + } + Return<void> getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb _hidl_cb) { + return mLatestDriver->getNumberOfCacheFilesNeeded(_hidl_cb); + } + Return<V1_0::ErrorStatus> prepareModelFromCache( + const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache, + const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) { + return mLatestDriver->prepareModelFromCache(modelCache, dataCache, token, callback); + } + Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { + return mLatestDriver->getCapabilities_1_1(_hidl_cb); + } + Return<void> getSupportedOperations_1_1(const V1_1::Model& model, + getSupportedOperations_1_1_cb _hidl_cb) override { + return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb); + } + Return<V1_0::ErrorStatus> prepareModel_1_1( + const V1_1::Model& model, ExecutionPreference preference, + const sp<V1_0::IPreparedModelCallback>& actualCallback) override { + return mLatestDriver->prepareModel_1_1(model, preference, actualCallback); + } + Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } + Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { + return mLatestDriver->getCapabilities(_hidl_cb); + } + Return<void> getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb _hidl_cb) override { + return mLatestDriver->getSupportedOperations(model, _hidl_cb); + } + Return<V1_0::ErrorStatus> prepareModel( + const V1_0::Model& model, + const sp<V1_0::IPreparedModelCallback>& actualCallback) override { + return mLatestDriver->prepareModel(model, actualCallback); + } + + private: + const sp<V1_3::IDevice> mLatestDriver; }; -class TestDriverV1_1 : public V1_1::ADevice { +// Like TestDriver, but implementing 1.1 +class TestDriverV1_1 : public V1_1::IDevice { public: TestDriverV1_1(const char* name, std::set<Signature> signatures) - : V1_1::ADevice(new TestDriver(name, std::move(signatures))) {} + : mLatestDriver(new TestDriver(name, std::move(signatures))) {} + Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { + return mLatestDriver->getCapabilities_1_1(_hidl_cb); + } + Return<void> getSupportedOperations_1_1(const V1_1::Model& model, + getSupportedOperations_1_1_cb _hidl_cb) override { + return mLatestDriver->getSupportedOperations_1_1(model, _hidl_cb); + } + Return<V1_0::ErrorStatus> prepareModel_1_1( + const V1_1::Model& model, ExecutionPreference preference, + const sp<V1_0::IPreparedModelCallback>& actualCallback) override { + return mLatestDriver->prepareModel_1_1(model, preference, actualCallback); + } + Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } + Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { + return mLatestDriver->getCapabilities(_hidl_cb); + } + Return<void> getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb _hidl_cb) override { + return mLatestDriver->getSupportedOperations(model, _hidl_cb); + } + Return<V1_0::ErrorStatus> prepareModel( + const V1_0::Model& model, + const sp<V1_0::IPreparedModelCallback>& actualCallback) override { + return mLatestDriver->prepareModel(model, actualCallback); + } + + private: + const sp<V1_3::IDevice> mLatestDriver; }; -class TestDriverV1_0 : public V1_0::ADevice { +// Like TestDriver, but implementing 1.0 +class TestDriverV1_0 : public V1_0::IDevice { public: TestDriverV1_0(const char* name, std::set<Signature> signatures) - : V1_0::ADevice(new TestDriver(name, std::move(signatures))) {} + : mLatestDriver(new TestDriver(name, std::move(signatures))) {} + Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { + return mLatestDriver->getCapabilities(_hidl_cb); + } + Return<void> getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb _hidl_cb) override { + return mLatestDriver->getSupportedOperations(model, _hidl_cb); + } + Return<V1_0::ErrorStatus> prepareModel( + const V1_0::Model& model, + const sp<V1_0::IPreparedModelCallback>& actualCallback) override { + return mLatestDriver->prepareModel(model, actualCallback); + } + Return<DeviceStatus> getStatus() override { return mLatestDriver->getStatus(); } + + private: + const sp<V1_3::IDevice> mLatestDriver; }; -SharedDevice RandomPartitioningTest::makeTestDriver(HalVersion version, const char* name, - std::set<Signature> signatures) { +V1_0::IDevice* RandomPartitioningTest::makeTestDriver(HalVersion version, const char* name, + std::set<Signature> signatures) { switch (version) { case HalVersion::V1_0: - return nn::makeSharedDevice(name, new TestDriverV1_0(name, std::move(signatures))); + return new TestDriverV1_0(name, std::move(signatures)); case HalVersion::V1_1: - return nn::makeSharedDevice(name, new TestDriverV1_1(name, std::move(signatures))); + return new TestDriverV1_1(name, std::move(signatures)); case HalVersion::V1_2: - return nn::makeSharedDevice(name, new TestDriverV1_2(name, std::move(signatures))); + return new TestDriverV1_2(name, std::move(signatures)); case HalVersion::V1_3: - return nn::makeSharedDevice(name, new TestDriver(name, std::move(signatures))); + return new TestDriver(name, std::move(signatures)); default: ADD_FAILURE() << "Unexpected HalVersion " << static_cast<int32_t>(version); return nullptr; } } -INSTANTIATE_TEST_SUITE_P(Seed, RandomPartitioningTest, - ::testing::Range(kFirstSeed, kFirstSeed + kNumTestCases)); +INSTANTIATE_TEST_CASE_P(Seed, RandomPartitioningTest, + ::testing::Range(kFirstSeed, kFirstSeed + kNumTestCases)); TEST_P(RandomPartitioningTest, Test) { LOG(INFO) << "RandomPartitioningTest: GetParam() = " << GetParam(); @@ -665,14 +751,7 @@ const unsigned problemSize = 1 + randUInt(kMaxProblemSize); const WrapperOperandType problemType(WrapperType::TENSOR_FLOAT32, {problemSize, problemSize}); - const WrapperOperandType unknownDimensionsTypes[] = { - {WrapperType::TENSOR_FLOAT32, {}}, - {WrapperType::TENSOR_FLOAT32, {0, 0}}, - {WrapperType::TENSOR_FLOAT32, {0, problemSize}}, - {WrapperType::TENSOR_FLOAT32, {problemSize, 0}}, - }; - const unsigned kUnknownDimensionsTypesCount = - sizeof(unknownDimensionsTypes) / sizeof(unknownDimensionsTypes[0]); + const WrapperOperandType unknownDimensionsType(WrapperType::TENSOR_FLOAT32, {0, 0}); static const WrapperOperandType activationFunctionType(WrapperType::INT32, {}); @@ -693,21 +772,10 @@ // joining disjoint subgraphs rather than by forcing a root. const bool forceCommonRoot = (randFrac() < 0.75); - auto computeMode = WrapperExecution::getComputeMode(); - // We check randFrac() independent of compute mode, because we don't want - // the random number sequence to change depending on compute mode: Compute - // mode should only affect how we perform the inference, not how we build the - // Model, the Compilation, or the Execution. - if (randFrac() < 0.5 && computeMode == WrapperExecution::ComputeMode::ASYNC) { - computeMode = WrapperExecution::ComputeMode::FENCED; - } - TestModel model; std::vector<uint32_t> modelInputs; std::vector<uint32_t> modelOutputs; - std::set<uint32_t> operandsWithUnknownDimensions; - // Each region in weights is a problem-sized 2-D TENSOR_FLOAT32. TestMemories weights; @@ -735,10 +803,9 @@ // operations). unsigned rootOperationCount = 0; - // Track whether we added operands with unknown dimensions. In this case, + // Track if we added operands with unknown dimensions. In this case, // partitioned compilation will fail if such an operand is read in a - // different partition than it is written, and the partition that does the - // writing is scheduled on a pre-HAL 1.2 (pre-Android Q) device. + // different partition than it is written. bool hasUnknownDimensions = false; // Generate operations. @@ -928,30 +995,19 @@ // OUTPUTS ///////////////////////////////////////////////////////////////////////////////// std::vector<uint32_t> operationOutputs(operationPattern.mNumOutputs); - std::generate( - operationOutputs.begin(), operationOutputs.end(), - [&operandsWithUnknownDimensions, &model, &problemType, &unknownDimensionsTypes, - &hasUnknownDimensions, allowUnknownDimensions, this] { - // Before the fix for http://b/132458982, 3% unknowns causes - // ~35% of partitionings to fail. After the fix, 3% - // unknowns causes ~3% of partitionings to fail. (This is - // determined by removing the fallback code and noting the - // number of failures.) - if (allowUnknownDimensions && randFrac() < 0.03) { - hasUnknownDimensions = true; - uint32_t opndIdx = model.addOperand( - &unknownDimensionsTypes[randUInt(kUnknownDimensionsTypesCount)]); - operandsWithUnknownDimensions.insert(opndIdx); - return opndIdx; - } else { - return model.addOperand(&problemType); - } - }); - - // TODO(b/174851714): Fix the partitioner and re-enable these tests. - if (!operandsWithUnknownDimensions.empty()) { - GTEST_SKIP() << "Skipping due to b/174851714"; - } + std::generate(operationOutputs.begin(), operationOutputs.end(), + [&model, &problemType, &unknownDimensionsType, &hasUnknownDimensions, + allowUnknownDimensions, this] { + // 3% unknowns causes ~35% of partitionings to fail + // (determined by commenting out the fallback code, + // running tests and noting number of failures). + if (allowUnknownDimensions && randFrac() < 0.03) { + hasUnknownDimensions = true; + return model.addOperand(&unknownDimensionsType); + } else { + return model.addOperand(&problemType); + } + }); // OPERATION /////////////////////////////////////////////////////////////////////////////// @@ -1034,21 +1090,6 @@ const auto& outputs = model.getOperationOutputs(randUInt(model.operationCount())); modelOutputs.push_back(outputs[randUInt(outputs.size())]); } - if (computeMode == WrapperExecution::ComputeMode::FENCED) { - if (std::any_of(modelOutputs.begin(), modelOutputs.end(), - [&operandsWithUnknownDimensions](uint32_t opndIdx) { - return operandsWithUnknownDimensions.count(opndIdx) != 0; - })) { - // Workaround for http://b/162980246: Fenced execution is documented - // as requiring model outputs to have fully specified dimensions, - // either from Model or from Execution, but its implementation - // requires this to come from Model. This test only guarantees that - // they have fully specified dimensions from Execution. So in the - // case of a Model where some output does not have fully specified - // dimensions, perform asynchronous execution instead. - computeMode = WrapperExecution::ComputeMode::ASYNC; - } - } model.identifyInputsAndOutputs(modelInputs, modelOutputs); #ifdef VERBOSE @@ -1110,119 +1151,43 @@ << std::endl; #endif auto device = DeviceManager::forTest_makeDriverDevice( - makeTestDriver(actualHalVersion, name.c_str(), signaturesForThisDriver)); + name, makeTestDriver(actualHalVersion, name.c_str(), signaturesForThisDriver)); devices.push_back(device); } // CPU fallback device devices.push_back(DeviceManager::getCpuDevice()); // Partitioned compilation. - // - // If a test case has both (a) unknown intermediate operand sizes and - // (b) partitions scheduled on pre-HAL 1.2 (pre-Android Q) devices, fallback - // is needed if the non-fallback partitioning fails. - // - // The issue is that prior to HAL 1.2, an output operand must have a known - // size provided either in the Model or in the Request; and in the case of - // partitioning, an intermediate operand of the original model that becomes - // an output operand of a partition won't have a known size provided in the - // Request. - // - // If a test case has a step model with no inputs or no outputs, fallback is needed. - // This is because our HAL specification requires a model to have at least one - // input and one output. - // - // If a fallback is needed, we retry the compilation with a fallback and require - // the fallback to succeed. Otherwise, we require the partitioning to succeed - // without CPU fallback. + // For test cases without unknown intermediate operand sizes we require the + // partitioning to succeed without CPU fallback. With unknown sizes we + // retry with a fallback if the non-fallback partitioning fails and require + // the fallback to succeed. TestCompilation cNoFallback(&model, devices); TestCompilation cWithFallback(&model, devices); + TestCompilation* c2 = nullptr; ASSERT_EQ(cNoFallback.setPartitioning(DeviceManager::kPartitioningWithoutFallback), Result::NO_ERROR); auto compilationResult = cNoFallback.finish(); - const bool fallbackNeededForDynamicTemporaries = - compilationResult == Result::OP_FAILED && hasUnknownDimensions && - cNoFallback.getExecutionPlan().hasDynamicTemporaries() && - std::any_of(devices.begin(), devices.end(), [](const std::shared_ptr<Device>& device) { - return device->getFeatureLevel() < nn::kHalVersionV1_2ToApi.featureLevel; - }); - const bool fallbackNeededForStepModelWithNoInputsOrNoOutputs = - cNoFallback.getExecutionPlan().forTest_hasStepModelWithNoInputsOrNoOutputs(); - const bool fallbackNeeded = fallbackNeededForDynamicTemporaries || - fallbackNeededForStepModelWithNoInputsOrNoOutputs; - if (fallbackNeeded) { - ASSERT_EQ(compilationResult, Result::OP_FAILED); - + if (hasUnknownDimensions && compilationResult == Result::OP_FAILED && + cNoFallback.getExecutionPlan().forTest_hasStepModelOutputsOfUnknownSize()) { ASSERT_EQ(cWithFallback.setPartitioning(DeviceManager::kPartitioningWithFallback), Result::NO_ERROR); - compilationResult = cWithFallback.finish(); - ASSERT_EQ(compilationResult, Result::NO_ERROR); + ASSERT_EQ(cWithFallback.finish(), Result::NO_ERROR); ASSERT_EQ(cWithFallback.getExecutionPlan().forTest_getKind(), ExecutionPlan::Kind::SIMPLE); ASSERT_EQ(cWithFallback.getExecutionPlan().forTest_simpleGetDevice(), DeviceManager::getCpuDevice()); + c2 = &cWithFallback; } else { ASSERT_EQ(compilationResult, Result::NO_ERROR); - - const ExecutionPlan& plan = cNoFallback.getExecutionPlan(); - if (signaturesForDriver.size() == 1) { - ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::SIMPLE); - ASSERT_TRUE(plan.forTest_simpleGetDevice() != DeviceManager::getCpuDevice()); - } else { - ASSERT_EQ(plan.forTest_getKind(), ExecutionPlan::Kind::COMPOUND); - auto stepToDeviceId = [](const std::shared_ptr<LogicalStep>& step) { - return step->executionStep()->getDevice(); - }; - std::set<decltype(stepToDeviceId(plan.forTest_compoundGetSteps()[0]))> deviceSet; - for (const auto& step : plan.forTest_compoundGetSteps()) { - deviceSet.insert(stepToDeviceId(step)); - } - // TODO(b/178517567): Figure out why we sometimes have 1 more - // signature than we have devices -- this means that we've scheduled - // one or more operations onto the CPU fallback device, which is not - // something we ever expect to do. - ASSERT_TRUE(deviceSet.size() == signaturesForDriver.size() || - deviceSet.size() == signaturesForDriver.size() + 1); - } + c2 = &cNoFallback; } - TestCompilation& c2 = (fallbackNeeded ? cWithFallback : cNoFallback); -#ifdef TRACE_DYNTEMP - { - const ExecutionPlan& plan = c2.getExecutionPlan(); - const size_t dynamicTemporaryCount = plan.forTest_flatGetDynamicTemporaries().size(); - std::cout << "TRACE_DYNTEMP: dynamic temporary count = " << dynamicTemporaryCount - << std::endl; - if (plan.forTest_getKind() == ExecutionPlan::Kind::COMPOUND) { - size_t stepsWithModelOutputsThatAreDownstreamInputs = 0; - size_t countOfModelOutputsThatAreDownstreamInputs = 0; - for (const auto& step : plan.forTest_compoundGetSteps()) { - if (const size_t count = step->executionStep() - ->getModelOutputsThatAreDownstreamInputs() - .size()) { - ++stepsWithModelOutputsThatAreDownstreamInputs; - countOfModelOutputsThatAreDownstreamInputs += count; - } - } - if (countOfModelOutputsThatAreDownstreamInputs != 0) { - std::cout << "TRACE_DYNTEMP: model outputs that are downstream inputs: " - << countOfModelOutputsThatAreDownstreamInputs << " / " - << modelOutputs.size() << ", over " - << stepsWithModelOutputsThatAreDownstreamInputs << " / " - << plan.forTest_compoundGetSteps().size() << " steps" << std::endl; - EXPECT_LE(countOfModelOutputsThatAreDownstreamInputs, modelOutputs.size()); - } - } else { - EXPECT_EQ(dynamicTemporaryCount, size_t(0)) - << "Only COMPOUND plan should have dynamic temporaries"; - } - } -#endif #ifdef VERBOSE { std::cout << "signatures = " << signatures.size() << ", devices = " << devices.size() << std::endl; // TODO: When dumping steps, include non-ExecutionSteps. - const ExecutionPlan& plan = c2.getExecutionPlan(); + const ExecutionPlan& plan = c2->getExecutionPlan(); switch (plan.forTest_getKind()) { case ExecutionPlan::Kind::SIMPLE: std::cout << "plan: simple" << std::endl; @@ -1256,10 +1221,10 @@ #endif // For execution: - // - create golden inputs (one long vector) and golden output value - // - golden inputs will be copied to actual inputs before each + // - create master inputs (one long vector) and master output value + // - master inputs will be copied to actual inputs before each // of the two executions - // - golden output will be used to fill actual outputs before each + // - master output will be used to fill actual outputs before each // of the two executions // - create actual inputs and outputs // - first execution (non-partitioned) @@ -1278,15 +1243,15 @@ // versus partitioned execution. Similarly, execution behavior // should not be dependent on the outputs; but we'll initialize the // outputs anyway. - std::vector<float> goldenInputs(problemSize * problemSize * model.inputCount()); - std::generate(goldenInputs.begin(), goldenInputs.end(), [this] { return randFrac(); }); + std::vector<float> masterInputs(problemSize * problemSize * model.inputCount()); + std::generate(masterInputs.begin(), masterInputs.end(), [this] { return randFrac(); }); #ifdef VERBOSE { std::cout << "flat inputs = "; - dump(goldenInputs.begin(), goldenInputs.end()); + dump(masterInputs.begin(), masterInputs.end()); } #endif - const float goldenOutput = randFrac(); + const float masterOutput = randFrac(); // Create the memory for the actual inputs and outputs. struct InputOutputDescriptor { @@ -1336,21 +1301,21 @@ // Function to set up actual inputs and outputs (initializing them // and telling the WrapperExecution about them). - auto prepareForExecution = [&model, &ioDescriptors, &ioMemories, &goldenInputs, &goldenOutput, + auto prepareForExecution = [&model, &ioDescriptors, &ioMemories, &masterInputs, &masterOutput, problemSize, &problemType](WrapperExecution* e) { uint32_t inputIndex = 0, outputIndex = 0; for (auto& desc : ioDescriptors) { if (desc.getLocation() == InputOutputDescriptor::VECTOR) { if (desc.mKind == InputOutputDescriptor::INPUT) { const size_t inputOffset = inputIndex * problemSize * problemSize; - std::copy(goldenInputs.begin() + inputOffset, - goldenInputs.begin() + inputOffset + problemSize * problemSize, + std::copy(masterInputs.begin() + inputOffset, + masterInputs.begin() + inputOffset + problemSize * problemSize, desc.mVector.begin()); e->setInput(inputIndex++, desc.mVector.data(), desc.mVector.size() * sizeof(float)); } else { std::fill(desc.mVector.begin(), - desc.mVector.begin() + problemSize * problemSize, goldenOutput); + desc.mVector.begin() + problemSize * problemSize, masterOutput); e->setOutput(outputIndex++, desc.mVector.data(), desc.mVector.size() * sizeof(float), &problemType.operandType); } @@ -1362,12 +1327,12 @@ CHECK(length == problemSize * problemSize * sizeof(float)); if (desc.mKind == InputOutputDescriptor::INPUT) { const size_t inputOffset = inputIndex * problemSize * problemSize; - std::copy(goldenInputs.begin() + inputOffset, - goldenInputs.begin() + inputOffset + problemSize * problemSize, + std::copy(masterInputs.begin() + inputOffset, + masterInputs.begin() + inputOffset + problemSize * problemSize, region); e->setInputFromMemory(inputIndex++, memory, offset, length); } else { - std::fill(region, region + problemSize * problemSize, goldenOutput); + std::fill(region, region + problemSize * problemSize, masterOutput); e->setOutputFromMemory(outputIndex++, memory, offset, length, &problemType.operandType); } @@ -1380,7 +1345,7 @@ // Non-partitioned execution. WrapperExecution e(&c); ASSERT_NO_FATAL_FAILURE(prepareForExecution(&e)); - ASSERT_EQ(e.compute(computeMode), Result::NO_ERROR); + ASSERT_EQ(e.compute(), Result::NO_ERROR); // Copy the outputs of the non-partitioned execution to a save area. std::vector<float> nonPartitionedOutputs(problemSize * problemSize * model.outputCount()); @@ -1411,9 +1376,9 @@ } // Partitioned execution. - WrapperExecution e2(&c2); + WrapperExecution e2(c2); ASSERT_NO_FATAL_FAILURE(prepareForExecution(&e2)); - ASSERT_EQ(e2.compute(computeMode), Result::NO_ERROR); + ASSERT_EQ(e2.compute(), Result::NO_ERROR); // Compare the outputs of the partitioned execution to the save // area containing the outpus of the non-partitioned execution.
diff --git a/runtime/test/TestRemoveDefaultArguments.cpp b/runtime/test/TestRemoveDefaultArguments.cpp index efe3497..ac4ad7d 100644 --- a/runtime/test/TestRemoveDefaultArguments.cpp +++ b/runtime/test/TestRemoveDefaultArguments.cpp
@@ -14,8 +14,6 @@ * limitations under the License. */ -#include <SampleDriverPartial.h> -#include <Utils.h> #include <gtest/gtest.h> #include <algorithm> @@ -25,9 +23,10 @@ #include <vector> #include "GeneratedTestUtils.h" -#include "HalUtils.h" #include "Manager.h" +#include "SampleDriverPartial.h" #include "TestNeuralNetworksWrapper.h" +#include "Utils.h" namespace generated_tests::avg_pool_v1_2 { const test_helper::TestModel& get_test_model_nhwc(); @@ -99,6 +98,7 @@ namespace android::nn { namespace { +using namespace hal; using sample_driver::SampleDriverPartial; using Result = test_wrapper::Result; using WrapperOperandType = test_wrapper::OperandType; @@ -113,18 +113,18 @@ public: TestDriver() : SampleDriverPartial(kTestDriverName) {} - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { - cb(V1_3::ErrorStatus::NONE, makeCapabilities(1.0)); - return hardware::Void(); + Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override { + cb(V1_3::ErrorStatus::NONE, {/* Dummy zero-filled capabilities. */}); + return Void(); } void setSupportedInputCount(uint32_t count) { mSupportedInputCount = count; } private: - std::vector<bool> getSupportedOperationsImpl(const V1_3::Model& model) const override { + std::vector<bool> getSupportedOperationsImpl(const Model& model) const override { std::vector<bool> supported(model.main.operations.size()); std::transform(model.main.operations.begin(), model.main.operations.end(), - supported.begin(), [this](const V1_3::Operation& operation) { + supported.begin(), [this](const Operation& operation) { SCOPED_TRACE("operation = " + toString(operation.type)); EXPECT_EQ(operation.inputs.size(), mSupportedInputCount); return operation.inputs.size() == mSupportedInputCount; @@ -142,8 +142,7 @@ GTEST_SKIP(); } mTestDriver = new TestDriver(); - DeviceManager::get()->forTest_registerDevice( - makeSharedDevice(kTestDriverName, mTestDriver)); + DeviceManager::get()->forTest_registerDevice(kTestDriverName, mTestDriver); mTestDevice = getDeviceByName(kTestDriverName); ASSERT_NE(mTestDevice, nullptr); }
diff --git a/runtime/test/TestTrivialModel.cpp b/runtime/test/TestTrivialModel.cpp index 83e8aaa..836e61d 100644 --- a/runtime/test/TestTrivialModel.cpp +++ b/runtime/test/TestTrivialModel.cpp
@@ -14,11 +14,11 @@ * limitations under the License. */ -#include <android-base/scopeguard.h> -#include <gtest/gtest.h> - #include "TestNeuralNetworksWrapper.h" +//#include <android-base/logging.h> +#include <gtest/gtest.h> + using namespace android::nn::test_wrapper; namespace { @@ -32,10 +32,6 @@ protected: virtual void SetUp() {} -#if defined(__ANDROID__) - void testAddTwoWithHardwareBufferInput(uint64_t additionalAhwbUsage); -#endif - const Matrix3x4 matrix1 = {{1.f, 2.f, 3.f, 4.f}, {5.f, 6.f, 7.f, 8.f}, {9.f, 10.f, 11.f, 12.f}}; const Matrix3x4 matrix2 = {{100.f, 200.f, 300.f, 400.f}, {500.f, 600.f, 700.f, 800.f}, @@ -126,27 +122,19 @@ ASSERT_EQ(CompareMatrices(expected2, actual), 0); } -// Hardware buffers are an Android concept, which aren't necessarily -// available on other platforms such as ChromeOS, which also build NNAPI. -#if defined(__ANDROID__) -void TrivialTest::testAddTwoWithHardwareBufferInput(uint64_t additionalAhwbUsage) { +TEST_F(TrivialTest, AddTwoWithHardwareBufferInput) { Model modelAdd2; CreateAddTwoTensorModel(&modelAdd2); - const uint64_t cpuUsage = - AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN; AHardwareBuffer_Desc desc{ .width = sizeof(matrix1), .height = 1, .layers = 1, .format = AHARDWAREBUFFER_FORMAT_BLOB, - .usage = cpuUsage | additionalAhwbUsage, + .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN, }; AHardwareBuffer* matrix1Buffer = nullptr; ASSERT_EQ(AHardwareBuffer_allocate(&desc, &matrix1Buffer), 0); - auto allocateGuard = android::base::make_scope_guard( - [matrix1Buffer]() { AHardwareBuffer_release(matrix1Buffer); }); - Memory matrix1Memory(matrix1Buffer); ASSERT_TRUE(matrix1Memory.isValid()); @@ -163,7 +151,7 @@ // Set the value for matrix1Buffer. void* bufferPtr = nullptr; - ASSERT_EQ(AHardwareBuffer_lock(matrix1Buffer, cpuUsage, -1, NULL, &bufferPtr), 0); + ASSERT_EQ(AHardwareBuffer_lock(matrix1Buffer, desc.usage, -1, NULL, &bufferPtr), 0); memcpy((uint8_t*)bufferPtr, matrix1, sizeof(matrix1)); int synFenceFd = -1; ASSERT_EQ(AHardwareBuffer_unlock(matrix1Buffer, &synFenceFd), 0); @@ -187,17 +175,9 @@ } ASSERT_EQ(CompareMatrices(expected2, actual), 0); + AHardwareBuffer_release(matrix1Buffer); } -TEST_F(TrivialTest, AddTwoWithHardwareBufferInput) { - testAddTwoWithHardwareBufferInput(/* no additional usage */ 0u); -} - -TEST_F(TrivialTest, AddTwoWithHardwareBufferInputWithGPUUsage) { - testAddTwoWithHardwareBufferInput(AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER); -} -#endif - TEST_F(TrivialTest, AddThree) { Model modelAdd3; CreateAddThreeTensorModel(&modelAdd3, matrix3);
diff --git a/runtime/test/TestUnknownDimensions.cpp b/runtime/test/TestUnknownDimensions.cpp index f6a0d4d..1c7eed0 100644 --- a/runtime/test/TestUnknownDimensions.cpp +++ b/runtime/test/TestUnknownDimensions.cpp
@@ -77,16 +77,12 @@ const std::vector<DimensionKind>& seconds); auto ioValues = Combine(ioDimensionValues, ioDimensionValues); auto constantValues = Combine(constantDimensionValues, constantDimensionValues); -std::vector<Execution::ComputeMode> computeModes = { - Execution::ComputeMode::SYNC, - Execution::ComputeMode::FENCED}; class UnknownDimensionsTest : public ::testing::TestWithParam<OperandParams> { protected: template <class T, Type TensorType> void TestOne(const OperandParams& paramsForInput0, const OperandParams& paramsForInput1, - const OperandParams& paramsForConst, const OperandParams& paramsForOutput, - Execution::ComputeMode computeMode); + const OperandParams& paramsForConst, const OperandParams& paramsForOutput); template <class T, Type TensorType> void TestAll(); @@ -166,8 +162,7 @@ void UnknownDimensionsTest::TestOne(const OperandParams& paramsForInput0, const OperandParams& paramsForInput1, const OperandParams& paramsForConst, - const OperandParams& paramsForOutput, - Execution::ComputeMode computeMode) { + const OperandParams& paramsForOutput) { typedef T IntendedMatrix[INTENDED_SIZE][INTENDED_SIZE]; static const IntendedMatrix ones = {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}}; static const IntendedMatrix twos = {{2, 2, 2}, {2, 2, 2}, {2, 2, 2}}; @@ -298,7 +293,7 @@ Result::NO_ERROR); if (allAreIntendedSizeAtExecution) { - ASSERT_EQ(execution.compute(computeMode), Result::NO_ERROR); + ASSERT_EQ(execution.compute(), Result::NO_ERROR); } else { // There is no contract (yet) for specific errors in NeuralNetworks.h, // so we just assert on not being successful. @@ -329,10 +324,8 @@ for (auto paramsForInput1 : ioValues) { for (auto paramsForConst : constantValues) { for (auto paramsForOutput : ioValues) { - for (auto computeMode : computeModes) { - TestOne<T, TensorType>(paramsForInput0, paramsForInput1, paramsForConst, - paramsForOutput, computeMode); - } + TestOne<T, TensorType>(paramsForInput0, paramsForInput1, paramsForConst, + paramsForOutput); } } } @@ -350,6 +343,6 @@ TestAll<_Float16, Type::TENSOR_FLOAT16>(); } -INSTANTIATE_TEST_SUITE_P(UnknownCombinationsTest, UnknownDimensionsTest, - ::testing::ValuesIn(ioValues)); +INSTANTIATE_TEST_CASE_P(UnknownCombinationsTest, UnknownDimensionsTest, + ::testing::ValuesIn(ioValues)); } // end namespace
diff --git a/runtime/test/TestUnspecifiedDimensions.cpp b/runtime/test/TestUnspecifiedDimensions.cpp index 7779439..ff8210a 100644 --- a/runtime/test/TestUnspecifiedDimensions.cpp +++ b/runtime/test/TestUnspecifiedDimensions.cpp
@@ -14,18 +14,15 @@ * limitations under the License. */ +#include "TestNeuralNetworksWrapper.h" + +#include <sys/mman.h> +#include <tuple> +#include <vector> + #include <android-base/macros.h> #include <android/sharedmem.h> #include <gtest/gtest.h> -#include <sys/mman.h> - -#include <memory> -#include <string> -#include <tuple> -#include <utility> -#include <vector> - -#include "TestNeuralNetworksWrapper.h" using namespace android::nn::test_wrapper; @@ -89,27 +86,26 @@ enum class BufferSize { LESS, EQUAL, MORE }; // only used for output buffer size enum class OperandLocation { BUFFER, MEMORY }; // where the operand reside enum class InOutType { INPUT, OUTPUT }; // parameter for setInOut() - // Whether input/output padding is implicitly disabled, enabled, or explicitly disabled - enum class PaddingEnabled { DEFAULT, ENABLED, DISABLED }; class SharedMemoryForTest { public: - SharedMemoryForTest() : memory(nullptr), fd(-1), buffer(nullptr) {} + SharedMemoryForTest() : memory(nullptr), fd(-1), buffer(nullptr), length(0) {} ~SharedMemoryForTest() { if (buffer != nullptr) { - munmap(buffer, kLength); + munmap(buffer, length); } if (fd > -1) { close(fd); } } void initialize(size_t size, const void* data) { - fd = ASharedMemory_create(nullptr, kLength); + length = size; + fd = ASharedMemory_create(nullptr, size); ASSERT_GT(fd, -1); - buffer = (uint8_t*)mmap(nullptr, kLength, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + buffer = (uint8_t*)mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); ASSERT_NE(buffer, nullptr); memcpy(buffer, data, size); - memory = std::make_shared<Memory>(kLength, PROT_READ | PROT_WRITE, fd, 0); + memory = std::make_shared<Memory>(size, PROT_READ | PROT_WRITE, fd, 0); ASSERT_TRUE(memory->isValid()); } const Memory* getMemory() const { return memory.get(); } @@ -120,8 +116,7 @@ std::shared_ptr<Memory> memory; int fd; uint8_t* buffer; - // Always allocate an ashmem of 64 bytes. This is large enough for all use cases. - static constexpr size_t kLength = 64; + size_t length; }; std::string toString(SpecificationLevel level) { @@ -163,19 +158,6 @@ } } - std::string toString(PaddingEnabled enabled) { - switch (enabled) { - case PaddingEnabled::DEFAULT: - return "DEFAULT"; - case PaddingEnabled::ENABLED: - return "ENABLED"; - case PaddingEnabled::DISABLED: - return "DISABLED"; - default: - return "UNKNOWN"; - } - } - protected: virtual void SetUp() { uint32_t modelIndex, executionIndex; @@ -185,9 +167,6 @@ executionIndex = kIndex0_Execution; mBadIndexChoices = {kIndexCount, modelIndex, executionIndex}; mOperandLocationChoices = {OperandLocation::BUFFER, OperandLocation::MEMORY}; - mBufferSizeChoices = {BufferSize::LESS, BufferSize::EQUAL, BufferSize::MORE}; - mEnablePaddingChoices = {PaddingEnabled::DEFAULT, PaddingEnabled::ENABLED, - PaddingEnabled::DISABLED}; break; case UnspecifiedOperand::CONST_MANDATORY: modelIndex = kIndex1_Model; @@ -220,8 +199,6 @@ mBadIndexChoices = {kIndexCount, modelIndex, executionIndex}; mOperandLocationChoices = {OperandLocation::BUFFER, OperandLocation::MEMORY}; mBufferSizeChoices = {BufferSize::LESS, BufferSize::EQUAL, BufferSize::MORE}; - mEnablePaddingChoices = {PaddingEnabled::DEFAULT, PaddingEnabled::ENABLED, - PaddingEnabled::DISABLED}; break; default: break; @@ -363,20 +340,13 @@ // Phase 3: Set Execution Input/Output Execution execution(&compilation); - // Enable padding - if (mEnablePadding == PaddingEnabled::ENABLED) { - ASSERT_EQ(execution.enableInputAndOutputPadding(true), Result::NO_ERROR); - } else if (mEnablePadding == PaddingEnabled::DISABLED) { - ASSERT_EQ(execution.enableInputAndOutputPadding(false), Result::NO_ERROR); - } - // Set input0 Result result; T bufferOp0[6] = {1, 2, 3, 4, 5, 6}; SharedMemoryForTest memoryOp0; memoryOp0.initialize(sizeof(bufferOp0), bufferOp0); result = setInOut<T>(&execution, kIndex0_Execution, 0, {kValueA, valueB}, bufferOp0, - &memoryOp0, InOutType::INPUT, mBufferSize); + &memoryOp0, InOutType::INPUT); ASSERT_EQ(result, expectSetInput0()); if (result != Result::NO_ERROR) continue; @@ -393,7 +363,7 @@ SharedMemoryForTest memoryOp4; memoryOp4.initialize(sizeof(bufferOp4), bufferOp4); result = setInOut<T>(&execution, kIndex4_Execution, 0, {valueB, kValueA}, bufferOp4, - &memoryOp4, InOutType::OUTPUT, mBufferSize); + &memoryOp4, InOutType::OUTPUT, mOutputBufferSize); ASSERT_EQ(result, expectSetOutput0()); if (result != Result::NO_ERROR) continue; @@ -434,8 +404,6 @@ // - the provided type is not fully specified // - the provided type does not agree with the type set at model construction time // - no type is provided and the type is not fully specified at model construction time - // - the buffer size (length) is less than needed - // - the buffer size (length) is more than needed and padding is not enabled Result expectSetInput0() { const auto kLevel0_Model = mSpecificationLevels[kIndex0_Model]; const auto kLevel0_Execution = mSpecificationLevels[kIndex0_Execution]; @@ -448,28 +416,11 @@ kLevel0_Model != SpecificationLevel::UNSPECIFIED_RANK) { return Result::BAD_DATA; } - if (mBufferSize == BufferSize::LESS) { - return Result::BAD_DATA; - } - if (mEnablePadding != PaddingEnabled::ENABLED && mBufferSize == BufferSize::MORE) { - return Result::BAD_DATA; - } break; case SpecificationLevel::UNSPECIFIED_TYPE: if (kLevel0_Model == SpecificationLevel::UNSPECIFIED_DIM || - kLevel0_Model == SpecificationLevel::UNSPECIFIED_RANK) { - return Result::BAD_DATA; - } - if (mBufferSize == BufferSize::LESS) { - return Result::BAD_DATA; - } - if (mEnablePadding != PaddingEnabled::ENABLED && mBufferSize == BufferSize::MORE) { - return Result::BAD_DATA; - } - // This is the case when the dimension is incorrectly specified in the model. - // With incorrect dimension, the needed size is 2 * 3 = 6 data type size. - // BufferSize::EQUAL (2 * 2 = 4 data type size) cannot provide enough length. - if (mBadIndex == kIndex0_Model && mBufferSize == BufferSize::EQUAL) { + kLevel0_Model == SpecificationLevel::UNSPECIFIED_RANK || + mBadIndex == kIndex0_Model) { return Result::BAD_DATA; } break; @@ -504,8 +455,7 @@ // Expect BAD_DATA on output0 for the following cases // - the provided type is less detailed as the type set at model construction time // - the provided type does not agree with the type set at model construction time - // - the buffer size (length) is less than needed - // - the buffer size (length) is more than needed and padding is not enabled + // - the buffer size does not agree with a fully specified type Result expectSetOutput0() { const auto kLevel4_Model = mSpecificationLevels[kIndex4_Model]; const auto kLevel4_Execution = mSpecificationLevels[kIndex4_Execution]; @@ -523,32 +473,17 @@ } break; case SpecificationLevel::FULLY_SPECIFIED: - if ((mBadIndex == kIndex4_Model || mBadIndex == kIndex4_Execution) && - kLevel4_Model != SpecificationLevel::UNSPECIFIED_RANK) { - return Result::BAD_DATA; - } - if (mBufferSize == BufferSize::LESS) { - return Result::BAD_DATA; - } - if (mEnablePadding != PaddingEnabled::ENABLED && mBufferSize == BufferSize::MORE) { + if (((mBadIndex == kIndex4_Model || mBadIndex == kIndex4_Execution) && + kLevel4_Model != SpecificationLevel::UNSPECIFIED_RANK) || + mOutputBufferSize != BufferSize::EQUAL) { return Result::BAD_DATA; } break; case SpecificationLevel::UNSPECIFIED_TYPE: - if (kLevel4_Model == SpecificationLevel::FULLY_SPECIFIED) { - if (mBufferSize == BufferSize::LESS) { - return Result::BAD_DATA; - } - if (mEnablePadding != PaddingEnabled::ENABLED && - mBufferSize == BufferSize::MORE) { - return Result::BAD_DATA; - } - // This is the case when the dimension is incorrectly specified in the model. - // With incorrect dimension, the needed size is 2 * 3 = 6 data type size. - // BufferSize::EQUAL (2 * 2 = 4 data type size) cannot provide enough length. - if (mBadIndex == kIndex4_Model && mBufferSize == BufferSize::EQUAL) { - return Result::BAD_DATA; - } + if (kLevel4_Model == SpecificationLevel::FULLY_SPECIFIED && + (mOutputBufferSize != BufferSize::EQUAL || mBadIndex == kIndex4_Model || + mBadIndex == kIndex4_Execution)) { + return Result::BAD_DATA; } break; default: @@ -563,7 +498,7 @@ Result expectCompute() { if (mBadIndex < 8) { return Result::OP_FAILED; - } else if (mBufferSize == BufferSize::LESS) { + } else if (mOutputBufferSize == BufferSize::LESS) { return Result::OUTPUT_INSUFFICIENT_SIZE; } return Result::NO_ERROR; @@ -572,8 +507,7 @@ // Iterate over combinations of // - mBadIndexChoices: which operand has incorrect dimension // - mOperandLocationChoices: where the operand reside, buffer or shared memory - // - mBufferSizeChoices: whether the provided buffer/memory size is sufficient - // - mEnablePaddingChoices: whether input/output memory padding is enabled + // - mBufferSizeChoices: whether the provided output buffer/memory size is sufficient template <typename T, Type TensorType> void TestAll() { SCOPED_TRACE("Model: " + toString(kSpecificationLevelModel)); @@ -581,8 +515,8 @@ mOperandTypes = {TensorType, TensorType, TensorType, Type::TENSOR_INT32, TensorType, TensorType, Type::TENSOR_INT32, TensorType}; for (const auto kBadIndex : mBadIndexChoices) { - mBadIndex = kBadIndex; SCOPED_TRACE("Bad Index: " + std::to_string(mBadIndex)); + mBadIndex = kBadIndex; if (mBadIndex < 8 && (mSpecificationLevels[mBadIndex] == SpecificationLevel::UNSPECIFIED_RANK || mSpecificationLevels[mBadIndex] == SpecificationLevel::UNSPECIFIED_TYPE)) { @@ -591,14 +525,10 @@ for (const auto kOperandLocation : mOperandLocationChoices) { mOperandLocation = kOperandLocation; SCOPED_TRACE("Operand Location: " + toString(mOperandLocation)); - for (const auto kBufferSize : mBufferSizeChoices) { - mBufferSize = kBufferSize; - SCOPED_TRACE("Buffer Size: " + toString(mBufferSize)); - for (const auto kEnablePadding : mEnablePaddingChoices) { - mEnablePadding = kEnablePadding; - SCOPED_TRACE("Enable Padding: " + toString(mEnablePadding)); - TestOne<T, TensorType>(); - } + for (const auto kOutputBufferSize : mBufferSizeChoices) { + mOutputBufferSize = kOutputBufferSize; + SCOPED_TRACE("Output Buffer Size: " + toString(mOutputBufferSize)); + TestOne<T, TensorType>(); } } } @@ -616,12 +546,10 @@ std::vector<uint32_t> mBadIndexChoices; std::vector<OperandLocation> mOperandLocationChoices; std::vector<BufferSize> mBufferSizeChoices = {BufferSize::EQUAL}; - std::vector<PaddingEnabled> mEnablePaddingChoices = {PaddingEnabled::DEFAULT}; uint32_t mBadIndex; OperandLocation mOperandLocation; - BufferSize mBufferSize; - PaddingEnabled mEnablePadding; + BufferSize mOutputBufferSize; }; TEST_P(UnspecifiedDimensionsTest, Float32) { @@ -644,31 +572,31 @@ SpecificationLevel::UNSPECIFIED_RANK, SpecificationLevel::UNSPECIFIED_TYPE); static const auto kFullySpecified = testing::Values(SpecificationLevel::FULLY_SPECIFIED); -INSTANTIATE_TEST_SUITE_P(ModelInputTest, UnspecifiedDimensionsTest, - testing::Combine(testing::Values(UnspecifiedOperand::INPUT_MANDATORY), - kAllSpecificationLevelsModel, - kAllSpecificationLevelsExecution)); +INSTANTIATE_TEST_CASE_P(ModelInputTest, UnspecifiedDimensionsTest, + testing::Combine(testing::Values(UnspecifiedOperand::INPUT_MANDATORY), + kAllSpecificationLevelsModel, + kAllSpecificationLevelsExecution)); -INSTANTIATE_TEST_SUITE_P(ConstantParameterTest, UnspecifiedDimensionsTest, - testing::Combine(testing::Values(UnspecifiedOperand::CONST_MANDATORY), - kAllSpecificationLevelsModel, kFullySpecified)); +INSTANTIATE_TEST_CASE_P(ConstantParameterTest, UnspecifiedDimensionsTest, + testing::Combine(testing::Values(UnspecifiedOperand::CONST_MANDATORY), + kAllSpecificationLevelsModel, kFullySpecified)); -INSTANTIATE_TEST_SUITE_P(TemporaryVariableTest, UnspecifiedDimensionsTest, - testing::Combine(testing::Values(UnspecifiedOperand::TEMPORARY_VARIABLE), - kAllSpecificationLevelsModel, kFullySpecified)); +INSTANTIATE_TEST_CASE_P(TemporaryVariableTest, UnspecifiedDimensionsTest, + testing::Combine(testing::Values(UnspecifiedOperand::TEMPORARY_VARIABLE), + kAllSpecificationLevelsModel, kFullySpecified)); -INSTANTIATE_TEST_SUITE_P(OptionalConstantTest, UnspecifiedDimensionsTest, - testing::Combine(testing::Values(UnspecifiedOperand::CONST_OPTIONAL), - kAllSpecificationLevelsModel, kFullySpecified)); +INSTANTIATE_TEST_CASE_P(OptionalConstantTest, UnspecifiedDimensionsTest, + testing::Combine(testing::Values(UnspecifiedOperand::CONST_OPTIONAL), + kAllSpecificationLevelsModel, kFullySpecified)); -INSTANTIATE_TEST_SUITE_P(OptionalInputTest, UnspecifiedDimensionsTest, - testing::Combine(testing::Values(UnspecifiedOperand::INPUT_OPTIONAL), - kAllSpecificationLevelsModel, - kAllSpecificationLevelsExecution)); +INSTANTIATE_TEST_CASE_P(OptionalInputTest, UnspecifiedDimensionsTest, + testing::Combine(testing::Values(UnspecifiedOperand::INPUT_OPTIONAL), + kAllSpecificationLevelsModel, + kAllSpecificationLevelsExecution)); -INSTANTIATE_TEST_SUITE_P(ModelOutputTest, UnspecifiedDimensionsTest, - testing::Combine(testing::Values(UnspecifiedOperand::OUTPUT), - kAllSpecificationLevelsModel, - kAllSpecificationLevelsExecution)); +INSTANTIATE_TEST_CASE_P(ModelOutputTest, UnspecifiedDimensionsTest, + testing::Combine(testing::Values(UnspecifiedOperand::OUTPUT), + kAllSpecificationLevelsModel, + kAllSpecificationLevelsExecution)); } // end namespace
diff --git a/runtime/test/TestUpdatability.cpp b/runtime/test/TestUpdatability.cpp deleted file mode 100644 index bd5b517..0000000 --- a/runtime/test/TestUpdatability.cpp +++ /dev/null
@@ -1,49 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <gtest/gtest.h> - -#include "NeuralNetworks.h" - -class UpdatabilityTest : public ::testing::Test {}; - -TEST_F(UpdatabilityTest, GetFeatureLevel) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - EXPECT_GE(ANeuralNetworks_getRuntimeFeatureLevel(), ANEURALNETWORKS_FEATURE_LEVEL_5); - } else { - GTEST_SKIP(); - } -} - -TEST_F(UpdatabilityTest, DeviceFeatureLevelLowerOrEqualToRuntimeFeatureLevel) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - const int64_t runtimeFeatureLevel = ANeuralNetworks_getRuntimeFeatureLevel(); - uint32_t numDevices = 0; - EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR); - for (uint32_t i = 0; i < numDevices; i++) { - SCOPED_TRACE(i); - int64_t featureLevel; - ANeuralNetworksDevice* device; - EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel), - ANEURALNETWORKS_NO_ERROR); - - EXPECT_LE(featureLevel, runtimeFeatureLevel); - } - } else { - GTEST_SKIP(); - } -}
diff --git a/runtime/test/TestValidateOperations.cpp b/runtime/test/TestValidateOperations.cpp index 846b788..f8d376e 100644 --- a/runtime/test/TestValidateOperations.cpp +++ b/runtime/test/TestValidateOperations.cpp
@@ -15,6 +15,7 @@ */ #include <gmock/gmock.h> +#include <gtest/gtest-death-test.h> #include <gtest/gtest.h> #include <algorithm> @@ -207,8 +208,7 @@ private: std::vector<OperandTypeWithExtraParams> ModifyForRank( - const std::vector<OperandTypeWithExtraParams>& operandsTypeWithParams, - uint32_t newRank) { + const std::vector<OperandTypeWithExtraParams>& operandsTypeWithParams, uint_t newRank) { std::vector<OperandTypeWithExtraParams> result; std::transform(operandsTypeWithParams.cbegin(), operandsTypeWithParams.cend(), std::back_inserter(result), @@ -219,7 +219,7 @@ } OperandTypeWithExtraParams ModifyForRank( - const OperandTypeWithExtraParams& operandTypeWithParams, uint32_t newRank) { + const OperandTypeWithExtraParams& operandTypeWithParams, uint_t newRank) { if (operandTypeWithParams.operandType.dimensionCount == newRank) { return operandTypeWithParams; } @@ -734,7 +734,7 @@ .dimensions = opDimensions, }}; EXPECT_DEATH(TensorRankConstraint::Exactly(3).MutationsWithValidRank({operand}), - ".*(A|a)ssertion.+failed.*"); + ".*assertion.+failed.*"); }; TEST(TensorRankConstraint, ExactlyWillReturnTwoInvalidMutationsWithLowerAndHigherRank) {
diff --git a/runtime/test/TestValidation.cpp b/runtime/test/TestValidation.cpp index 49ab15b..181d399 100644 --- a/runtime/test/TestValidation.cpp +++ b/runtime/test/TestValidation.cpp
@@ -16,9 +16,6 @@ #include <android-base/logging.h> #include <android-base/scopeguard.h> -// android/log.h contains __INTRODUCED_IN() macro and must be included before -// sharedmem.h -#include <android/log.h> #include <android/sharedmem.h> #include <gtest/gtest.h> #include <sys/mman.h> @@ -31,7 +28,6 @@ #include <utility> #include <vector> -#include "AndroidVersionUtil.h" #include "NeuralNetworks.h" #include "NeuralNetworksOEM.h" @@ -336,10 +332,8 @@ addScalarOperand(model); addTensorOperand(model, dimensionsUnspecified); - ASSERT_EQ(ANeuralNetworksModel_setOperandValue(model, 1, &constData, sizeof(float)), - ANEURALNETWORKS_NO_ERROR); - ASSERT_EQ(ANeuralNetworksModel_setOperandValue(model, 2, &actData, sizeof(uint32_t)), - ANEURALNETWORKS_NO_ERROR); + ANeuralNetworksModel_setOperandValue(model, 1, &constData, sizeof(float)); + ANeuralNetworksModel_setOperandValue(model, 2, &actData, sizeof(uint32_t)); uint32_t inList[] = {0, 1, 2}, outList[] = {3}; ASSERT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_ADD, 3, inList, 1, @@ -584,7 +578,7 @@ EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, nullptr, sizeof(buffer)), ANEURALNETWORKS_UNEXPECTED_NULL); - // This should fail, because buffer is not the size of a float32. + // This should fail, since buffer is not the size of a float32. EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, buffer, sizeof(buffer)), ANEURALNETWORKS_BAD_DATA); @@ -622,11 +616,11 @@ EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, nullptr, 0, sizeof(float)), ANEURALNETWORKS_UNEXPECTED_NULL); - // This should fail, because the operand does not exist. + // This should fail, since the operand does not exist. EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, -1, memory, 0, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // This should fail, because memory is not the size of a float32. + // This should fail, since memory is not the size of a float32. EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, 0, memorySize), ANEURALNETWORKS_BAD_DATA); @@ -634,12 +628,12 @@ EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 1, memory, 0, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // This should fail, because offset is larger than memorySize. + // This should fail, since offset is larger than memorySize. EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, memorySize + 1, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // This should fail, because requested size is larger than the memory. + // This should fail, since requested size is larger than the memory. EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, memorySize - 3, sizeof(float)), ANEURALNETWORKS_BAD_DATA); @@ -650,7 +644,6 @@ ANEURALNETWORKS_BAD_STATE); // close memory - ANeuralNetworksMemory_free(memory); close(memoryFd); } @@ -678,12 +671,10 @@ EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &memory), ANEURALNETWORKS_NO_ERROR); - // This should fail, because non-BLOB AHardwareBuffer is not allowed. + // This should fail, since non-BLOB AHardwareBuffer is not allowed. EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, 0, sizeof(uint8_t)), ANEURALNETWORKS_BAD_DATA); - // close memory - ANeuralNetworksMemory_free(memory); AHardwareBuffer_release(buffer); } @@ -709,18 +700,16 @@ EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &memory), ANEURALNETWORKS_NO_ERROR); - // This should fail, because offset is larger than memorySize. + // This should fail, since offset is larger than memorySize. EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, memorySize + 1, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // This should fail, because requested size is larger than the memory. + // This should fail, since requested size is larger than the memory. EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, memorySize - 3, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // close memory - ANeuralNetworksMemory_free(memory); AHardwareBuffer_release(buffer); } @@ -760,7 +749,7 @@ EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromModel(mModel, 0, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL); - // This should fail, because the operand does not exist. + // This should fail, since the operand does not exist. EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromModel(mModel, -1, valueModel), ANEURALNETWORKS_BAD_DATA); @@ -1146,71 +1135,6 @@ ANEURALNETWORKS_BAD_DATA); } -TEST_F(ValidationTestCompilation, GetPreferredMemoryAlignmentAndPadding) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - uint32_t result; - - // The following calls should fail, because the compilation has not been finished. - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(mCompilation, 0, - &result), - ANEURALNETWORKS_BAD_STATE); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(mCompilation, 0, - &result), - ANEURALNETWORKS_BAD_STATE); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(mCompilation, 0, - &result), - ANEURALNETWORKS_BAD_STATE); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(mCompilation, 0, - &result), - ANEURALNETWORKS_BAD_STATE); - - EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR); - - // The following calls should fail because of unexpected nullptr. - EXPECT_EQ( - ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(nullptr, 0, &result), - ANEURALNETWORKS_UNEXPECTED_NULL); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(mCompilation, 0, - nullptr), - ANEURALNETWORKS_UNEXPECTED_NULL); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(nullptr, 0, &result), - ANEURALNETWORKS_UNEXPECTED_NULL); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(mCompilation, 0, - nullptr), - ANEURALNETWORKS_UNEXPECTED_NULL); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(nullptr, 0, - &result), - ANEURALNETWORKS_UNEXPECTED_NULL); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(mCompilation, 0, - nullptr), - ANEURALNETWORKS_UNEXPECTED_NULL); - EXPECT_EQ( - ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(nullptr, 0, &result), - ANEURALNETWORKS_UNEXPECTED_NULL); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(mCompilation, 0, - nullptr), - ANEURALNETWORKS_UNEXPECTED_NULL); - - // The following calls should fail, because the index is out of range. - const uint32_t invalidIndex = 1000; - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput( - mCompilation, invalidIndex, &result), - ANEURALNETWORKS_BAD_DATA); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput( - mCompilation, invalidIndex, &result), - ANEURALNETWORKS_BAD_DATA); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput( - mCompilation, invalidIndex, &result), - ANEURALNETWORKS_BAD_DATA); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput( - mCompilation, invalidIndex, &result), - ANEURALNETWORKS_BAD_DATA); - - } else { - GTEST_SKIP(); - } -} - // Also see TEST_F(ValidationTestCompilationForDevices_1, CreateExecution) TEST_F(ValidationTestCompilation, CreateExecution) { ANeuralNetworksExecution* execution = nullptr; @@ -1265,9 +1189,6 @@ EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(execution, false), ANEURALNETWORKS_BAD_DATA); EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(execution, true), ANEURALNETWORKS_BAD_DATA); - - // close memory - ANeuralNetworksExecution_free(execution); } // Also see TEST_F(ValidationTestCompilationForDevices_1, ExecutionTiming) @@ -1277,301 +1198,129 @@ enum class ExecutionType : uint32_t { ASYNC, SYNC, BURST, FENCED }; for (auto executionType : {ExecutionType::ASYNC, ExecutionType::SYNC, ExecutionType::BURST, ExecutionType::FENCED}) { - for (bool explicitlyDisableReusablility : {false, true}) { - SCOPED_TRACE(static_cast<uint32_t>(executionType)); - SCOPED_TRACE(explicitlyDisableReusablility); + SCOPED_TRACE(static_cast<uint32_t>(executionType)); - ANeuralNetworksExecution* execution; - ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), - ANEURALNETWORKS_NO_ERROR); + ANeuralNetworksExecution* execution; + ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), + ANEURALNETWORKS_NO_ERROR); - if (explicitlyDisableReusablility) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - ASSERT_EQ(ANeuralNetworksExecution_setReusable(execution, false), - ANEURALNETWORKS_NO_ERROR); - } else { - ANeuralNetworksExecution_free(execution); - continue; - } - } + float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2]; + int in2 = 0; + ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)), + ANEURALNETWORKS_NO_ERROR); + ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, &in1, sizeof(in1)), + ANEURALNETWORKS_NO_ERROR); + ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, &in2, sizeof(in2)), + ANEURALNETWORKS_NO_ERROR); + ASSERT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, sizeof(out0)), + ANEURALNETWORKS_NO_ERROR); + + const size_t memorySize = std::max(sizeof(in0), sizeof(out0)); + int memoryFd = ASharedMemory_create("nnMemory", memorySize); + ASSERT_GT(memoryFd, 0); + ANeuralNetworksMemory* memory; + EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, + 0, &memory), + ANEURALNETWORKS_NO_ERROR); + + auto testTooLate = [this, execution, &in0, &out0, memory] { + // Try a bunch of things that are impermissible if the execution has started. + + // Set loop timeout. + ASSERT_EQ(ANeuralNetworksExecution_setLoopTimeout(execution, kShortWaitInNanoseconds), + ANEURALNETWORKS_BAD_STATE); // Set inputs and outputs. - float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2]; - int in2 = 0; ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)), - ANEURALNETWORKS_NO_ERROR); - ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, &in1, sizeof(in1)), - ANEURALNETWORKS_NO_ERROR); - ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, &in2, sizeof(in2)), - ANEURALNETWORKS_NO_ERROR); + ANEURALNETWORKS_BAD_STATE); ASSERT_EQ( ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, sizeof(out0)), - ANEURALNETWORKS_NO_ERROR); + ANEURALNETWORKS_BAD_STATE); + ASSERT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, memory, 0, + sizeof(in0)), + ANEURALNETWORKS_BAD_STATE); + ASSERT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0, + sizeof(out0)), + ANEURALNETWORKS_BAD_STATE); - const size_t memorySize = std::max(sizeof(in0), sizeof(out0)); - int memoryFd = ASharedMemory_create("nnMemory", memorySize); - ASSERT_GT(memoryFd, 0); - ANeuralNetworksMemory* memory; - EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, - memoryFd, 0, &memory), - ANEURALNETWORKS_NO_ERROR); - - auto testTooLate = [this, execution, &in0, &out0, memory] { - // Try a bunch of things that are impermissible if the execution has started. - - // Set loop timeout. - ASSERT_EQ( - ANeuralNetworksExecution_setLoopTimeout(execution, kShortWaitInNanoseconds), - ANEURALNETWORKS_BAD_STATE); - - // Enable/Disable input and output padding. - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - ASSERT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(execution, true), - ANEURALNETWORKS_BAD_STATE); - ASSERT_EQ( - ANeuralNetworksExecution_enableInputAndOutputPadding(execution, false), - ANEURALNETWORKS_BAD_STATE); - } - - // Set inputs and outputs. - ASSERT_EQ( - ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)), - ANEURALNETWORKS_BAD_STATE); - ASSERT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, - sizeof(out0)), + // Reuse for asynchronous execution. + { + ANeuralNetworksEvent* event; + ASSERT_EQ(ANeuralNetworksExecution_startCompute(execution, &event), ANEURALNETWORKS_BAD_STATE); - ASSERT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, memory, - 0, sizeof(in0)), - ANEURALNETWORKS_BAD_STATE); - ASSERT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, - memory, 0, sizeof(out0)), - ANEURALNETWORKS_BAD_STATE); - - // Set reusable. - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - ASSERT_EQ(ANeuralNetworksExecution_setReusable(execution, true), - ANEURALNETWORKS_BAD_STATE); - ASSERT_EQ(ANeuralNetworksExecution_setReusable(execution, false), - ANEURALNETWORKS_BAD_STATE); - } - - // Reuse for asynchronous execution. - { - ANeuralNetworksEvent* event; - ASSERT_EQ(ANeuralNetworksExecution_startCompute(execution, &event), - ANEURALNETWORKS_BAD_STATE); - } - - // Reuse for synchronous execution. - ASSERT_EQ(ANeuralNetworksExecution_compute(execution), ANEURALNETWORKS_BAD_STATE); - - // Reuse for burst execution. - { - ANeuralNetworksBurst* burst; - ASSERT_EQ(ANeuralNetworksBurst_create(mCompilation, &burst), - ANEURALNETWORKS_NO_ERROR); - ASSERT_EQ(ANeuralNetworksExecution_burstCompute(execution, burst), - ANEURALNETWORKS_BAD_STATE); - ANeuralNetworksBurst_free(burst); - } - - // Reuse for fenced execution. - { - ANeuralNetworksEvent* event; - ASSERT_EQ(ANeuralNetworksExecution_startComputeWithDependencies( - execution, nullptr, 0, 0, &event), - ANEURALNETWORKS_BAD_STATE); - } - }; - - // Compute. - switch (executionType) { - case ExecutionType::ASYNC: { - ANeuralNetworksEvent* event; - ASSERT_EQ(ANeuralNetworksExecution_startCompute(execution, &event), - ANEURALNETWORKS_NO_ERROR); - testTooLate(); - ASSERT_EQ(ANeuralNetworksEvent_wait(event), ANEURALNETWORKS_NO_ERROR); - testTooLate(); - ANeuralNetworksEvent_free(event); - break; - } - case ExecutionType::SYNC: { - ASSERT_EQ(ANeuralNetworksExecution_compute(execution), - ANEURALNETWORKS_NO_ERROR); - testTooLate(); - break; - } - case ExecutionType::BURST: { - ANeuralNetworksBurst* burst; - ASSERT_EQ(ANeuralNetworksBurst_create(mCompilation, &burst), - ANEURALNETWORKS_NO_ERROR); - ASSERT_EQ(ANeuralNetworksExecution_burstCompute(execution, burst), - ANEURALNETWORKS_NO_ERROR); - testTooLate(); - ANeuralNetworksBurst_free(burst); - break; - } - case ExecutionType::FENCED: { - ANeuralNetworksEvent* event; - ASSERT_EQ(ANeuralNetworksExecution_startComputeWithDependencies( - execution, nullptr, 0, 0, &event), - ANEURALNETWORKS_NO_ERROR); - testTooLate(); - ASSERT_EQ(ANeuralNetworksEvent_wait(event), ANEURALNETWORKS_NO_ERROR); - testTooLate(); - ANeuralNetworksEvent_free(event); - break; - } - default: - FAIL() << "Unreachable"; } - // close memory - ANeuralNetworksExecution_free(execution); - ANeuralNetworksMemory_free(memory); - close(memoryFd); - } - } -} + // Reuse for synchronous execution. + ASSERT_EQ(ANeuralNetworksExecution_compute(execution), ANEURALNETWORKS_BAD_STATE); -static void testConcurrentExecution(bool reusable, ANeuralNetworksCompilation* compilation) { - ASSERT_EQ(ANeuralNetworksCompilation_finish(compilation), ANEURALNETWORKS_NO_ERROR); + // Reuse for burst execution. + { + ANeuralNetworksBurst* burst; + ASSERT_EQ(ANeuralNetworksBurst_create(mCompilation, &burst), + ANEURALNETWORKS_NO_ERROR); + ASSERT_EQ(ANeuralNetworksExecution_burstCompute(execution, burst), + ANEURALNETWORKS_BAD_STATE); + ANeuralNetworksBurst_free(burst); + } - enum class ExecutionType : uint32_t { ASYNC, SYNC, BURST, FENCED }; - const auto compute = [compilation](ExecutionType executionType, - ANeuralNetworksExecution* execution) -> int { + // Reuse for fenced execution. + { + ANeuralNetworksEvent* event; + ASSERT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(execution, nullptr, + 0, 0, &event), + ANEURALNETWORKS_BAD_STATE); + } + }; + + // Compute. switch (executionType) { case ExecutionType::ASYNC: { ANeuralNetworksEvent* event; - int result = ANeuralNetworksExecution_startCompute(execution, &event); - if (result == ANEURALNETWORKS_NO_ERROR) { - result = ANeuralNetworksEvent_wait(event); - } + ASSERT_EQ(ANeuralNetworksExecution_startCompute(execution, &event), + ANEURALNETWORKS_NO_ERROR); + testTooLate(); + ASSERT_EQ(ANeuralNetworksEvent_wait(event), ANEURALNETWORKS_NO_ERROR); + testTooLate(); ANeuralNetworksEvent_free(event); - return result; + break; } case ExecutionType::SYNC: { - return ANeuralNetworksExecution_compute(execution); + ASSERT_EQ(ANeuralNetworksExecution_compute(execution), ANEURALNETWORKS_NO_ERROR); + testTooLate(); + break; } case ExecutionType::BURST: { ANeuralNetworksBurst* burst; - int result = ANeuralNetworksBurst_create(compilation, &burst); - if (result == ANEURALNETWORKS_NO_ERROR) { - result = ANeuralNetworksExecution_burstCompute(execution, burst); - } + ASSERT_EQ(ANeuralNetworksBurst_create(mCompilation, &burst), + ANEURALNETWORKS_NO_ERROR); + ASSERT_EQ(ANeuralNetworksExecution_burstCompute(execution, burst), + ANEURALNETWORKS_NO_ERROR); + testTooLate(); ANeuralNetworksBurst_free(burst); - return result; + break; } case ExecutionType::FENCED: { ANeuralNetworksEvent* event; - int result = ANeuralNetworksExecution_startComputeWithDependencies( - execution, nullptr, 0, 0, &event); - if (result == ANEURALNETWORKS_NO_ERROR) { - result = ANeuralNetworksEvent_wait(event); - } - ANeuralNetworksEvent_free(event); - return result; - } - } - }; - - const std::vector<ExecutionType> kExecutionTypes = { - ExecutionType::ASYNC, ExecutionType::SYNC, ExecutionType::BURST, ExecutionType::FENCED}; - for (auto executionType1 : kExecutionTypes) { - for (auto executionType2 : kExecutionTypes) { - SCOPED_TRACE(static_cast<uint32_t>(executionType1)); - SCOPED_TRACE(static_cast<uint32_t>(executionType2)); - - ANeuralNetworksExecution* execution; - ASSERT_EQ(ANeuralNetworksExecution_create(compilation, &execution), - ANEURALNETWORKS_NO_ERROR); - - float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2]; - int in2 = 0; - ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)), - ANEURALNETWORKS_NO_ERROR); - ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, &in1, sizeof(in1)), - ANEURALNETWORKS_NO_ERROR); - ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, &in2, sizeof(in2)), - ANEURALNETWORKS_NO_ERROR); - ASSERT_EQ( - ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, sizeof(out0)), - ANEURALNETWORKS_NO_ERROR); - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - ASSERT_EQ(ANeuralNetworksExecution_setReusable(execution, reusable), + ASSERT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(execution, nullptr, + 0, 0, &event), ANEURALNETWORKS_NO_ERROR); - } else { - if (reusable) { - ANeuralNetworksExecution_free(execution); - return; - } + testTooLate(); + ASSERT_EQ(ANeuralNetworksEvent_wait(event), ANEURALNETWORKS_NO_ERROR); + testTooLate(); + ANeuralNetworksEvent_free(event); + break; } - - // Compute on the same execution concurrently. - auto first = std::async(std::launch::async, [compute, executionType1, execution] { - return compute(executionType1, execution); - }); - auto second = std::async(std::launch::async, [compute, executionType2, execution] { - return compute(executionType2, execution); - }); - const int result1 = first.get(); - const int result2 = second.get(); - - // At least one result must be ANEURALNETWORKS_NO_ERROR. One may return - // ANEURALNETWORKS_BAD_STATE if the other is already executing. - EXPECT_TRUE(result1 == ANEURALNETWORKS_BAD_STATE || - result1 == ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(result2 == ANEURALNETWORKS_BAD_STATE || - result2 == ANEURALNETWORKS_NO_ERROR); - EXPECT_TRUE(result1 == ANEURALNETWORKS_NO_ERROR || result2 == ANEURALNETWORKS_NO_ERROR); - - // If the execution is not reusable, one result must be ANEURALNETWORKS_BAD_STATE. - if (!reusable) { - EXPECT_TRUE(result1 == ANEURALNETWORKS_BAD_STATE || - result2 == ANEURALNETWORKS_BAD_STATE); - } - - ANeuralNetworksExecution_free(execution); + default: + FAIL() << "Unreachable"; } } } -// Also see TEST_F(ValidationTestBurst, BurstComputeConcurrent) -TEST_F(ValidationTestCompilation, ReusableExecutionConcurrent) { - testConcurrentExecution(/*reusable=*/true, mCompilation); -} -TEST_F(ValidationTestCompilation, NonReusableExecutionConcurrent) { - testConcurrentExecution(/*reusable=*/false, mCompilation); -} - TEST_F(ValidationTestExecution, SetLoopTimeout) { EXPECT_EQ(ANeuralNetworksExecution_setLoopTimeout(nullptr, kShortWaitInNanoseconds), ANEURALNETWORKS_UNEXPECTED_NULL); } -TEST_F(ValidationTestExecution, EnableInputAndOutputPadding) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(nullptr, true), - ANEURALNETWORKS_UNEXPECTED_NULL); - EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(nullptr, false), - ANEURALNETWORKS_UNEXPECTED_NULL); - } else { - GTEST_SKIP(); - } -} - -TEST_F(ValidationTestExecution, ExecutionSetReusable) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - EXPECT_EQ(ANeuralNetworksExecution_setReusable(nullptr, true), - ANEURALNETWORKS_UNEXPECTED_NULL); - EXPECT_EQ(ANeuralNetworksExecution_setReusable(nullptr, false), - ANEURALNETWORKS_UNEXPECTED_NULL); - } else { - GTEST_SKIP(); - } -} - TEST_F(ValidationTestExecution, SetInput) { char buffer[20]; EXPECT_EQ(ANeuralNetworksExecution_setInput(nullptr, 0, nullptr, buffer, sizeof(float)), @@ -1579,7 +1328,7 @@ EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, nullptr, sizeof(float)), ANEURALNETWORKS_UNEXPECTED_NULL); - // This should fail, because memory is not the size of a float32. + // This should fail, since memory is not the size of a float32. EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, buffer, 20), ANEURALNETWORKS_BAD_DATA); @@ -1591,7 +1340,7 @@ EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, -1, nullptr, buffer, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // These should fail, because the tensor types are invalid. + // These should fail, since the tensor types are invalid. EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, &kInvalidTensorType1, buffer, sizeof(float)), ANEURALNETWORKS_BAD_DATA); @@ -1606,21 +1355,6 @@ ANEURALNETWORKS_BAD_STATE); } -TEST_F(ValidationTestExecution, SetInputEnablePadding) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, true), - ANEURALNETWORKS_NO_ERROR); - - // This should fail, because length is less than the size of a float32. - char buffer[20]; - EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, buffer, - sizeof(float) - 1), - ANEURALNETWORKS_BAD_DATA); - } else { - GTEST_SKIP(); - } -} - TEST_F(ValidationTestExecution, SetOutput) { char buffer[20]; EXPECT_EQ(ANeuralNetworksExecution_setOutput(nullptr, 0, nullptr, buffer, sizeof(float)), @@ -1628,7 +1362,7 @@ EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, nullptr, sizeof(float)), ANEURALNETWORKS_UNEXPECTED_NULL); - // This should fail, because memory is not the size of a float32. + // This should fail, since memory is not the size of a float32. EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, buffer, 20), ANEURALNETWORKS_BAD_DATA); @@ -1640,7 +1374,7 @@ EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, -1, nullptr, buffer, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // These should fail, because the tensor types are invalid. + // These should fail, since the tensor types are invalid. EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, &kInvalidTensorType1, buffer, sizeof(float)), ANEURALNETWORKS_BAD_DATA); @@ -1655,21 +1389,6 @@ ANEURALNETWORKS_BAD_STATE); } -TEST_F(ValidationTestExecution, SetOutputEnablePadding) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, true), - ANEURALNETWORKS_NO_ERROR); - - // This should fail, because length is less than the size of a float32. - char buffer[20]; - EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, buffer, - sizeof(float) - 1), - ANEURALNETWORKS_BAD_DATA); - } else { - GTEST_SKIP(); - } -} - TEST_F(ValidationTestExecution, SetInputFromMemory) { const size_t memorySize = 20; int memoryFd = ASharedMemory_create("nnMemory", memorySize); @@ -1687,32 +1406,32 @@ sizeof(float)), ANEURALNETWORKS_UNEXPECTED_NULL); - // This should fail, because the operand does not exist. + // This should fail, since the operand does not exist. EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 999, nullptr, memory, 0, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // This should fail, because the operand does not exist. + // This should fail, since the operand does not exist. EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, -1, nullptr, memory, 0, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // This should fail, because memory is not the size of a float32. + // This should fail, since memory is not the size of a float32. EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0, memorySize), ANEURALNETWORKS_BAD_DATA); - // This should fail, because offset is larger than memorySize. + // This should fail, since offset is larger than memorySize. EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, memorySize + 1, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // This should fail, because requested size is larger than the memory. + // This should fail, since requested size is larger than the memory. EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, memorySize - 3, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // These should fail, because the tensor types are invalid. + // These should fail, since the tensor types are invalid. EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, &kInvalidTensorType1, memory, 0, sizeof(float)), ANEURALNETWORKS_BAD_DATA); @@ -1730,37 +1449,9 @@ ANEURALNETWORKS_BAD_STATE); // close memory - ANeuralNetworksMemory_free(memory); close(memoryFd); } -TEST_F(ValidationTestExecution, SetInputFromMemoryEnablePadding) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - const size_t memorySize = 20; - int memoryFd = ASharedMemory_create("nnMemory", memorySize); - ASSERT_GT(memoryFd, 0); - - ANeuralNetworksMemory* memory; - EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, - 0, &memory), - ANEURALNETWORKS_NO_ERROR); - - EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, true), - ANEURALNETWORKS_NO_ERROR); - - // This should fail, because length is less than the size of a float32. - EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0, - sizeof(float) - 1), - ANEURALNETWORKS_BAD_DATA); - - // close memory - ANeuralNetworksMemory_free(memory); - close(memoryFd); - } else { - GTEST_SKIP(); - } -} - TEST_F(ValidationTestExecution, SetInputFromAHardwareBufferBlob) { const size_t memorySize = 20; @@ -1779,21 +1470,21 @@ EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &memory), ANEURALNETWORKS_NO_ERROR); - // This should fail, because memory is not the size of a float32. + // This should fail, since memory is not the size of a float32. EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0, memorySize), ANEURALNETWORKS_BAD_DATA); - // This should fail, because offset is larger than memorySize. + // This should fail, since offset is larger than memorySize. EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, memorySize + 1, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // This should fail, because requested size is larger than the memory. + // This should fail, since requested size is larger than the memory. EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, memorySize - 3, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // These should fail, because the tensor types are invalid. + // These should fail, since the tensor types are invalid. EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, &kInvalidTensorType1, memory, 0, sizeof(float)), ANEURALNETWORKS_BAD_DATA); @@ -1801,47 +1492,9 @@ memory, 0, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // close memory - ANeuralNetworksMemory_free(memory); AHardwareBuffer_release(buffer); } -TEST_F(ValidationTestExecution, SetInputFromAHardwareBufferBlobEnablePadding) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - const size_t memorySize = 20; - - AHardwareBuffer_Desc desc{ - .width = memorySize, - .height = 1, - .layers = 1, - .format = AHARDWAREBUFFER_FORMAT_BLOB, - .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | - AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN, - }; - - AHardwareBuffer* buffer = nullptr; - ASSERT_EQ(AHardwareBuffer_allocate(&desc, &buffer), 0); - - ANeuralNetworksMemory* memory; - EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &memory), - ANEURALNETWORKS_NO_ERROR); - - EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, true), - ANEURALNETWORKS_NO_ERROR); - - // This should fail, because length is less than the size of a float32. - EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0, - sizeof(float) - 1), - ANEURALNETWORKS_BAD_DATA); - - // close memory - ANeuralNetworksMemory_free(memory); - AHardwareBuffer_release(buffer); - } else { - GTEST_SKIP(); - } -} - TEST_F(ValidationTestExecution, SetOutputFromMemory) { ANeuralNetworksExecution* execution; EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR); @@ -1862,32 +1515,32 @@ sizeof(float)), ANEURALNETWORKS_UNEXPECTED_NULL); - // This should fail, because the operand does not exist. + // This should fail, since the operand does not exist. EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 999, nullptr, memory, 0, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // This should fail, because the operand does not exist. + // This should fail, since the operand does not exist. EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, -1, nullptr, memory, 0, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // This should fail, because memory is not the size of a float32. + // This should fail, since memory is not the size of a float32. EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0, memorySize), ANEURALNETWORKS_BAD_DATA); - // This should fail, because offset is larger than memorySize. + // This should fail, since offset is larger than memorySize. EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, memorySize + 1, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // This should fail, because requested size is larger than the memory. + // This should fail, since requested size is larger than the memory. EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, memorySize - 3, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // These should fail, because the tensor types are invalid. + // These should fail, since the tensor types are invalid. EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, &kInvalidTensorType1, memory, 0, sizeof(float)), ANEURALNETWORKS_BAD_DATA); @@ -1905,43 +1558,9 @@ ANEURALNETWORKS_BAD_STATE); // close memory - ANeuralNetworksMemory_free(memory); - ANeuralNetworksExecution_free(execution); close(memoryFd); } -TEST_F(ValidationTestExecution, SetOutputFromMemoryEnablePadding) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - ANeuralNetworksExecution* execution; - EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), - ANEURALNETWORKS_NO_ERROR); - - const size_t memorySize = 20; - int memoryFd = ASharedMemory_create("nnMemory", memorySize); - ASSERT_GT(memoryFd, 0); - - ANeuralNetworksMemory* memory; - EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, - 0, &memory), - ANEURALNETWORKS_NO_ERROR); - - EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, true), - ANEURALNETWORKS_NO_ERROR); - - // This should fail, because length is less than the size of a float32. - EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0, - sizeof(float) - 1), - ANEURALNETWORKS_BAD_DATA); - - // close memory - ANeuralNetworksMemory_free(memory); - ANeuralNetworksExecution_free(execution); - close(memoryFd); - } else { - GTEST_SKIP(); - } -} - TEST_F(ValidationTestExecution, SetOutputFromAHardwareBufferBlob) { const size_t memorySize = 20; @@ -1960,22 +1579,22 @@ EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &memory), ANEURALNETWORKS_NO_ERROR); - // This should fail, because memory is not the size of a float32. + // This should fail, since memory is not the size of a float32. EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory, 0, memorySize), ANEURALNETWORKS_BAD_DATA); - // This should fail, because offset is larger than memorySize. + // This should fail, since offset is larger than memorySize. EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory, memorySize + 1, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // This should fail, because requested size is larger than the memory. + // This should fail, since requested size is larger than the memory. EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory, memorySize - 3, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // These should fail, because the tensor types are invalid. + // These should fail, since the tensor types are invalid. EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, &kInvalidTensorType1, memory, 0, sizeof(float)), ANEURALNETWORKS_BAD_DATA); @@ -1983,104 +1602,9 @@ memory, 0, sizeof(float)), ANEURALNETWORKS_BAD_DATA); - // close memory - ANeuralNetworksMemory_free(memory); AHardwareBuffer_release(buffer); } -TEST_F(ValidationTestExecution, SetOutputFromAHardwareBufferBlobEnablePadding) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - const size_t memorySize = 20; - - AHardwareBuffer_Desc desc{ - .width = memorySize, - .height = 1, - .layers = 1, - .format = AHARDWAREBUFFER_FORMAT_BLOB, - .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | - AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN, - }; - - AHardwareBuffer* buffer = nullptr; - ASSERT_EQ(AHardwareBuffer_allocate(&desc, &buffer), 0); - - ANeuralNetworksMemory* memory; - EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &memory), - ANEURALNETWORKS_NO_ERROR); - - EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, true), - ANEURALNETWORKS_NO_ERROR); - - // This should fail, because length is less than the size of a float32. - EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory, 0, - sizeof(float) - 1), - ANEURALNETWORKS_BAD_DATA); - - // close memory - ANeuralNetworksMemory_free(memory); - AHardwareBuffer_release(buffer); - } else { - GTEST_SKIP(); - } -} - -TEST_F(ValidationTestExecution, EnablePaddingAfterSetInputOutput) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - ANeuralNetworksExecution* execution; - char buffer[20]; - const size_t memorySize = 20; - int memoryFd = ASharedMemory_create("nnMemory", memorySize); - ASSERT_GT(memoryFd, 0); - - ANeuralNetworksMemory* memory; - EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, - 0, &memory), - ANEURALNETWORKS_NO_ERROR); - - // Enable padding after setInput. - EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, buffer, 8), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(execution, true), - ANEURALNETWORKS_BAD_STATE); - ANeuralNetworksExecution_free(execution); - - // Enable padding after setInputFromMemory. - EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, memory, 0, 8), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(execution, true), - ANEURALNETWORKS_BAD_STATE); - ANeuralNetworksExecution_free(execution); - - // Enable padding after setOutput. - EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, buffer, 8), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(execution, true), - ANEURALNETWORKS_BAD_STATE); - ANeuralNetworksExecution_free(execution); - - // Enable padding after setOutputFromMemory. - EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0, 8), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(execution, true), - ANEURALNETWORKS_BAD_STATE); - ANeuralNetworksExecution_free(execution); - - // close memory - ANeuralNetworksMemory_free(memory); - close(memoryFd); - } else { - GTEST_SKIP(); - } -} - TEST_F(ValidationTestExecutionDeviceMemory, SetInputFromMemory) { ANeuralNetworksMemoryDesc* desc; ASSERT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR); @@ -2249,9 +1773,6 @@ ANEURALNETWORKS_UNEXPECTED_NULL); EXPECT_EQ(ANeuralNetworksExecution_startCompute(execution, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL); - - // close memory - ANeuralNetworksExecution_free(execution); } TEST_F(ValidationTestExecution, EventWait) { @@ -2266,36 +1787,9 @@ } TEST_F(ValidationTest, EventGetSyncFenceFd) { - int syncFd = -100; - EXPECT_EQ(ANeuralNetworksEvent_getSyncFenceFd(nullptr, &syncFd), + int sync_fd = -1; + EXPECT_EQ(ANeuralNetworksEvent_getSyncFenceFd(nullptr, &sync_fd), ANEURALNETWORKS_UNEXPECTED_NULL); - EXPECT_EQ(syncFd, -1); -} - -TEST_F(ValidationTestExecution, EventGetSyncFenceFdFromStartCompute) { - // Create a valid execution and event first. - ANeuralNetworksExecution* execution; - EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR); - float input0[] = {1.0f, 1.0f}, input1[] = {2.0f, 2.0f}, output0[2]; - int32_t input2[] = {0}; - EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, input0, sizeof(input0)), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, input1, sizeof(input1)), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, input2, sizeof(input2)), - ANEURALNETWORKS_NO_ERROR); - EXPECT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, output0, sizeof(output0)), - ANEURALNETWORKS_NO_ERROR); - ANeuralNetworksEvent* event = nullptr; - EXPECT_EQ(ANeuralNetworksExecution_startCompute(execution, &event), ANEURALNETWORKS_NO_ERROR); - - // The event from startCompute is not backed by sync fence. - int syncFd = -100; - EXPECT_EQ(ANeuralNetworksEvent_getSyncFenceFd(event, &syncFd), ANEURALNETWORKS_BAD_DATA); - EXPECT_EQ(syncFd, -1); - - ANeuralNetworksEvent_free(event); - ANeuralNetworksExecution_free(execution); } TEST_F(ValidationTestExecution, FencedExecution) { @@ -2320,18 +1814,6 @@ EXPECT_EQ(ANeuralNetworksEvent_getSyncFenceFd(event1, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL); - // The event from startComputeWithDependencie may or may not be backed by a sync fence depending - // on the driver implementation. - int syncFd = -100; - int getSyncFdResult = ANeuralNetworksEvent_getSyncFenceFd(event1, &syncFd); - if (getSyncFdResult == ANEURALNETWORKS_NO_ERROR) { - EXPECT_GE(syncFd, 0); - close(syncFd); - } else { - EXPECT_EQ(getSyncFdResult, ANEURALNETWORKS_BAD_DATA); - EXPECT_EQ(syncFd, -1); - } - // The subsequent execution will wait for the first execution to finish. ANeuralNetworksExecution* execution2; ANeuralNetworksEvent* event2 = nullptr; @@ -2371,7 +1853,7 @@ ANEURALNETWORKS_NO_ERROR); uint32_t rank, dims[4], expectedRank = 1, expectedDims = 2; - // This should fail, because the execution has not yet started to compute. + // This should fail, since the execution has not yet started to compute. EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandRank(execution, 0, &rank), ANEURALNETWORKS_BAD_STATE); EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandDimensions(execution, 0, dims), @@ -2381,7 +1863,7 @@ EXPECT_EQ(ANeuralNetworksExecution_startCompute(execution, &event), ANEURALNETWORKS_NO_ERROR); EXPECT_EQ(ANeuralNetworksEvent_wait(event), ANEURALNETWORKS_NO_ERROR); - // This should fail, because unexpected nullptr. + // This should fail, since unexpected nullptr. EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandRank(nullptr, 0, &rank), ANEURALNETWORKS_UNEXPECTED_NULL); EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandDimensions(nullptr, 0, dims), @@ -2391,7 +1873,7 @@ EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandDimensions(execution, 0, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL); - // This should fail, because the operand does not exist. + // This should fail, since the operand does not exist. EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandRank(execution, -1, &rank), ANEURALNETWORKS_BAD_DATA); EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandRank(execution, 999, &rank), @@ -2407,10 +1889,6 @@ ANEURALNETWORKS_NO_ERROR); EXPECT_EQ(rank, expectedRank); EXPECT_EQ(dims[0], expectedDims); - - // close memory - ANeuralNetworksEvent_free(event); - ANeuralNetworksExecution_free(execution); } // Regression test for b/146044137. @@ -2524,10 +2002,6 @@ ANeuralNetworksBurst* burst; EXPECT_EQ(ANeuralNetworksBurst_create(compilation, &burst), ANEURALNETWORKS_BAD_STATE); - - // close memory - ANeuralNetworksBurst_free(burst); - ANeuralNetworksCompilation_free(compilation); } TEST_F(ValidationTestBurst, BurstComputeDifferentCompilations) { @@ -3009,9 +2483,6 @@ EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(execution, false), ANEURALNETWORKS_BAD_DATA); EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(execution, true), ANEURALNETWORKS_BAD_DATA); - - // close memory - ANeuralNetworksExecution_free(execution); } class ValidationTestInvalidCompilation : public ValidationTestModel { @@ -3063,29 +2534,6 @@ ANeuralNetworksCompilation* mInvalidCompilation = nullptr; }; -TEST_F(ValidationTestInvalidCompilation, GetPreferredMemoryAlignmentAndPadding) { - if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) { - if (!mInvalidCompilation) { - return; - } - uint32_t result; - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput( - mInvalidCompilation, 0, &result), - ANEURALNETWORKS_BAD_STATE); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(mInvalidCompilation, - 0, &result), - ANEURALNETWORKS_BAD_STATE); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput( - mInvalidCompilation, 0, &result), - ANEURALNETWORKS_BAD_STATE); - EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(mInvalidCompilation, - 0, &result), - ANEURALNETWORKS_BAD_STATE); - } else { - GTEST_SKIP(); - } -} - TEST_F(ValidationTestInvalidCompilation, CreateExecution) { if (!mInvalidCompilation) { return; @@ -3260,9 +2708,6 @@ } } } - - // close memory - ANeuralNetworksExecution_free(execution); } }
diff --git a/runtime/test/TestVersionedInterfaces.cpp b/runtime/test/TestVersionedInterfaces.cpp new file mode 100644 index 0000000..6d1306d --- /dev/null +++ b/runtime/test/TestVersionedInterfaces.cpp
@@ -0,0 +1,2916 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <android-base/logging.h> +#include <android/hardware/neuralnetworks/1.0/ADevice.h> +#include <android/hardware/neuralnetworks/1.1/ADevice.h> +#include <android/hardware/neuralnetworks/1.2/ADevice.h> +#include <android/hardware/neuralnetworks/1.3/ADevice.h> +#include <gmock/gmock.h> +#include <gtest/gtest.h> +#include <hidl/Status.h> +#include <utils/Errors.h> + +#include <limits> +#include <memory> +#include <utility> +#include <vector> + +#include "HalInterfaces.h" +#include "MemoryUtils.h" +#include "MetaModel.h" +#include "VersionedInterfaces.h" + +namespace android::nn { +namespace { + +using namespace hal; +using testing::_; +using testing::Invoke; +using testing::InvokeWithoutArgs; +using testing::MockFunction; +using MockDeviceFactory = MockFunction<sp<V1_0::IDevice>(bool blocking)>; + +constexpr uint32_t kNoCacheFilesNeeded = 0; +constexpr uint32_t kMaxNumberOfCacheFiles = + static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES); +constexpr Timing kNoTiming = {.timeOnDevice = std::numeric_limits<uint64_t>::max(), + .timeInDriver = std::numeric_limits<uint64_t>::max()}; + +template <typename... Args> +auto makeCallbackReturn(Args&&... args) { + return [argPack = std::make_tuple(std::forward<Args>(args)...)](const auto& cb) { + std::apply(cb, argPack); + return Void(); + }; +}; + +class MockDevice : public IDevice { + public: + static sp<MockDevice> create() { + const sp<MockDevice> mockDevice = new MockDevice(); + + const auto linkToDeathRet_ret = []() -> Return<bool> { return true; }; + const auto getCapabilities_ret = + makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_0::Capabilities{}); + const auto getCapabilities_1_1_ret = + makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_1::Capabilities{}); + const auto getVersionString_ret = + makeCallbackReturn(V1_0::ErrorStatus::NONE, "Google-MockV1"); + const auto getType_ret = makeCallbackReturn(V1_0::ErrorStatus::NONE, DeviceType::OTHER); + const auto getCapabilities_1_2_ret = + makeCallbackReturn(V1_0::ErrorStatus::NONE, V1_2::Capabilities{}); + const auto getSupportedExtensions_ret = + makeCallbackReturn(V1_0::ErrorStatus::NONE, hidl_vec<Extension>{}); + const auto getNumberOfCacheFilesNeeded_ret = makeCallbackReturn( + V1_0::ErrorStatus::NONE, kMaxNumberOfCacheFiles, kMaxNumberOfCacheFiles); + const auto getCapabilities_1_3_ret = + makeCallbackReturn(V1_3::ErrorStatus::NONE, V1_3::Capabilities{}); + + ON_CALL(*mockDevice, linkToDeathRet()).WillByDefault(Invoke(linkToDeathRet_ret)); + ON_CALL(*mockDevice, getCapabilities(_)).WillByDefault(Invoke(getCapabilities_ret)); + ON_CALL(*mockDevice, getCapabilities_1_1(_)).WillByDefault(Invoke(getCapabilities_1_1_ret)); + ON_CALL(*mockDevice, getVersionString(_)).WillByDefault(Invoke(getVersionString_ret)); + ON_CALL(*mockDevice, getType(_)).WillByDefault(Invoke(getType_ret)); + ON_CALL(*mockDevice, getCapabilities_1_2(_)).WillByDefault(Invoke(getCapabilities_1_2_ret)); + ON_CALL(*mockDevice, getSupportedExtensions(_)) + .WillByDefault(Invoke(getSupportedExtensions_ret)); + ON_CALL(*mockDevice, getNumberOfCacheFilesNeeded(_)) + .WillByDefault(Invoke(getNumberOfCacheFilesNeeded_ret)); + ON_CALL(*mockDevice, getCapabilities_1_3(_)).WillByDefault(Invoke(getCapabilities_1_3_ret)); + + // These EXPECT_CALL(...).Times(testing::AnyNumber()) calls are to + // suppress warnings on the uninteresting methods calls. + EXPECT_CALL(*mockDevice, linkToDeathRet()).Times(testing::AnyNumber()); + EXPECT_CALL(*mockDevice, getCapabilities(_)).Times(testing::AnyNumber()); + EXPECT_CALL(*mockDevice, getCapabilities_1_1(_)).Times(testing::AnyNumber()); + EXPECT_CALL(*mockDevice, getVersionString(_)).Times(testing::AnyNumber()); + EXPECT_CALL(*mockDevice, getType(_)).Times(testing::AnyNumber()); + EXPECT_CALL(*mockDevice, getCapabilities_1_2(_)).Times(testing::AnyNumber()); + EXPECT_CALL(*mockDevice, getSupportedExtensions(_)).Times(testing::AnyNumber()); + EXPECT_CALL(*mockDevice, getNumberOfCacheFilesNeeded(_)).Times(testing::AnyNumber()); + EXPECT_CALL(*mockDevice, getCapabilities_1_3(_)).Times(testing::AnyNumber()); + + return mockDevice; + } + + // IBase methods below. + Return<bool> linkToDeath(const sp<hidl_death_recipient>& recipient, + uint64_t /*cookie*/) override { + mDeathRecipient = recipient; + return linkToDeathRet(); + } + MOCK_METHOD(Return<void>, ping, (), (override)); + + // V1_0 methods below. + MOCK_METHOD(Return<void>, getCapabilities, (getCapabilities_cb cb), (override)); + MOCK_METHOD(Return<void>, getSupportedOperations, + (const V1_0::Model& model, getSupportedOperations_cb cb), (override)); + MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModel, + (const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& callback), + (override)); + MOCK_METHOD(Return<DeviceStatus>, getStatus, (), (override)); + + // V1_1 methods below. + MOCK_METHOD(Return<void>, getCapabilities_1_1, (getCapabilities_1_1_cb cb), (override)); + MOCK_METHOD(Return<void>, getSupportedOperations_1_1, + (const V1_1::Model& model, getSupportedOperations_1_1_cb cb), (override)); + MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModel_1_1, + (const V1_1::Model& model, ExecutionPreference preference, + const sp<V1_0::IPreparedModelCallback>& callback), + (override)); + + // V1_2 methods below. + MOCK_METHOD(Return<void>, getVersionString, (getVersionString_cb cb), (override)); + MOCK_METHOD(Return<void>, getType, (getType_cb cb), (override)); + MOCK_METHOD(Return<void>, getCapabilities_1_2, (getCapabilities_1_2_cb cb), (override)); + MOCK_METHOD(Return<void>, getSupportedExtensions, (getSupportedExtensions_cb cb), (override)); + MOCK_METHOD(Return<void>, getSupportedOperations_1_2, + (const V1_2::Model& model, getSupportedOperations_1_2_cb cb), (override)); + MOCK_METHOD(Return<void>, getNumberOfCacheFilesNeeded, (getNumberOfCacheFilesNeeded_cb cb), + (override)); + MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModel_1_2, + (const V1_2::Model& model, ExecutionPreference preference, + const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache, + const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback), + (override)); + MOCK_METHOD(Return<V1_0::ErrorStatus>, prepareModelFromCache, + (const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache, + const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback), + (override)); + + // V1_3 methods below. + MOCK_METHOD(Return<void>, getCapabilities_1_3, (getCapabilities_1_3_cb cb), (override)); + MOCK_METHOD(Return<void>, getSupportedOperations_1_3, + (const V1_3::Model& model, getSupportedOperations_1_3_cb cb), (override)); + MOCK_METHOD(Return<V1_3::ErrorStatus>, prepareModel_1_3, + (const V1_3::Model& model, ExecutionPreference preference, Priority priority, + const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache, + const hidl_vec<hidl_handle>& dataCache, const CacheToken& token, + const sp<V1_3::IPreparedModelCallback>& callback), + (override)); + MOCK_METHOD(Return<V1_3::ErrorStatus>, prepareModelFromCache_1_3, + (const OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache, + const hidl_vec<hidl_handle>& dataCache, const CacheToken& token, + const sp<V1_3::IPreparedModelCallback>& callback), + (override)); + MOCK_METHOD(Return<void>, allocate, + (const BufferDesc& desc, const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels, + const hidl_vec<BufferRole>& inputRoles, const hidl_vec<BufferRole>& outputRoles, + allocate_cb cb), + (override)); + + // Helper methods. + MOCK_METHOD(Return<bool>, linkToDeathRet, ()); + void simulateCrash() { + ASSERT_NE(nullptr, mDeathRecipient.get()); + + // Currently, the VersionedInterfaces code will not use the `cookie` or + // `who` arguments, so we pass in 0 and nullptr for these arguments + // instead. Normally, they are used by the hidl_death_recipient to + // determine which object is dead. However, the VersionedInterfaces + // code only pairs a single death recipient with a single HIDL + // interface object, so these arguments are redundant. + mDeathRecipient->serviceDied(0, nullptr); + } + + private: + // Members. + sp<hidl_death_recipient> mDeathRecipient; +}; + +class MockPreparedModel : public IPreparedModel { + public: + static sp<MockPreparedModel> create() { + const sp<MockPreparedModel> mockPreparedModel = new MockPreparedModel(); + + const auto linkToDeathRet_ret = []() -> Return<bool> { return true; }; + ON_CALL(*mockPreparedModel, linkToDeathRet()).WillByDefault(Invoke(linkToDeathRet_ret)); + + // This EXPECT_CALL(...).Times(testing::AnyNumber()) calls are to + // suppress warnings on the uninteresting methods calls. + EXPECT_CALL(*mockPreparedModel, linkToDeathRet()).Times(testing::AnyNumber()); + + return mockPreparedModel; + } + + // IBase methods below. + Return<bool> linkToDeath(const sp<hidl_death_recipient>& recipient, + uint64_t /*cookie*/) override { + mDeathRecipient = recipient; + return linkToDeathRet(); + } + MOCK_METHOD(Return<void>, ping, (), (override)); + + // V1_0 methods below. + MOCK_METHOD(Return<V1_0::ErrorStatus>, execute, + (const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback), + (override)); + + // V1_2 methods below. + MOCK_METHOD(Return<V1_0::ErrorStatus>, execute_1_2, + (const V1_0::Request& request, MeasureTiming measure, + const sp<V1_2::IExecutionCallback>& callback), + (override)); + MOCK_METHOD(Return<void>, executeSynchronously, + (const V1_0::Request& request, MeasureTiming measure, executeSynchronously_cb cb), + (override)); + MOCK_METHOD(Return<void>, configureExecutionBurst, + (const sp<V1_2::IBurstCallback>& callback, + const hardware::MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel, + const hardware::MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, + configureExecutionBurst_cb cb), + (override)); + + // V1_3 methods below. + MOCK_METHOD(Return<ErrorStatus>, execute_1_3, + (const V1_3::Request& request, MeasureTiming measure, + const OptionalTimePoint& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + const sp<IExecutionCallback>& callback), + (override)); + MOCK_METHOD(Return<void>, executeSynchronously_1_3, + (const V1_3::Request& request, MeasureTiming measure, + const OptionalTimePoint& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + executeSynchronously_1_3_cb cb), + (override)); + MOCK_METHOD(Return<void>, executeFenced, + (const V1_3::Request& request, const hidl_vec<hidl_handle>& waitFor, + MeasureTiming measure, const OptionalTimePoint& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + const OptionalTimeoutDuration& duration, executeFenced_cb cb), + (override)); + + // Helper methods. + MOCK_METHOD(Return<bool>, linkToDeathRet, ()); + void simulateCrash() { + ASSERT_NE(nullptr, mDeathRecipient.get()); + + // Currently, the VersionedInterfaces code will not use the `cookie` or + // `who` arguments, so we pass in 0 and nullptr for these arguments + // instead. Normally, they are used by the hidl_death_recipient to + // determine which object is dead. However, the VersionedInterfaces + // code only pairs a single death recipient with a single HIDL + // interface object, so these arguments are redundant. + mDeathRecipient->serviceDied(0, nullptr); + } + + private: + // Members. + sp<hidl_death_recipient> mDeathRecipient; +}; + +class MockBurstContext : public V1_2::IBurstContext { + public: + // V1_2 methods below. + MOCK_METHOD(Return<void>, freeMemory, (int32_t slot), (override)); +}; + +class MockFencedExecutionCallback : public IFencedExecutionCallback { + public: + // V1_3 methods below. + MOCK_METHOD(Return<void>, getExecutionInfo, (getExecutionInfo_cb cb), (override)); +}; + +class MockBuffer : public IBuffer { + public: + // V1_3 methods below. + MOCK_METHOD(Return<ErrorStatus>, copyTo, (const hidl_memory& dst), (override)); + MOCK_METHOD(Return<ErrorStatus>, copyFrom, + (const hidl_memory& src, const hidl_vec<uint32_t>& dimensions), (override)); +}; + +enum class Version { V1_0, V1_1, V1_2, V1_3, MOCK }; + +sp<V1_0::IDevice> adaptAs(const sp<MockDevice>& mockDevice, Version version) { + switch (version) { + case Version::V1_0: + return new V1_0::ADevice(mockDevice); + case Version::V1_1: + return new V1_1::ADevice(mockDevice); + case Version::V1_2: + return new V1_2::ADevice(mockDevice); + case Version::V1_3: + return new V1_3::ADevice(mockDevice); + case Version::MOCK: + return mockDevice; + } + LOG(FATAL) << "unrecognized version: " << static_cast<int>(version); + return nullptr; +} + +auto makePreparedModelReturn(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus, + const sp<MockPreparedModel>& preparedModel) { + return [launchStatus, returnStatus, preparedModel]( + const V1_0::Model& /*model*/, + const sp<V1_0::IPreparedModelCallback>& cb) -> Return<V1_0::ErrorStatus> { + cb->notify(returnStatus, preparedModel).isOk(); + return launchStatus; + }; +} +auto makePreparedModel_1_1Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus, + const sp<MockPreparedModel>& preparedModel) { + return [launchStatus, returnStatus, preparedModel]( + const V1_1::Model& /*model*/, ExecutionPreference /*preference*/, + const sp<V1_0::IPreparedModelCallback>& cb) -> Return<V1_0::ErrorStatus> { + cb->notify(returnStatus, preparedModel).isOk(); + return launchStatus; + }; +} +auto makePreparedModel_1_2Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus, + const sp<MockPreparedModel>& preparedModel) { + return [launchStatus, returnStatus, preparedModel]( + const V1_2::Model& /*model*/, ExecutionPreference /*preference*/, + const auto& /*modelCache*/, const auto& /*dataCache*/, const auto& /*token*/, + const sp<V1_2::IPreparedModelCallback>& cb) -> Return<V1_0::ErrorStatus> { + cb->notify_1_2(returnStatus, preparedModel).isOk(); + return launchStatus; + }; +} +auto makePreparedModel_1_3Return(V1_3::ErrorStatus launchStatus, V1_3::ErrorStatus returnStatus, + const sp<MockPreparedModel>& preparedModel) { + return [launchStatus, returnStatus, preparedModel]( + const V1_3::Model& /*model*/, ExecutionPreference /*preference*/, + Priority /*priority*/, const OptionalTimePoint& /*deadline*/, + const hidl_vec<hidl_handle>& /*modelCache*/, + const hidl_vec<hidl_handle>& /*dataCache*/, const CacheToken& /*token*/, + const sp<V1_3::IPreparedModelCallback>& cb) -> Return<V1_3::ErrorStatus> { + cb->notify_1_3(returnStatus, preparedModel).isOk(); + return launchStatus; + }; +} + +auto makeExecuteReturn(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus) { + return [launchStatus, returnStatus]( + const V1_0::Request& /*request*/, + const sp<V1_0::IExecutionCallback>& cb) -> Return<V1_0::ErrorStatus> { + cb->notify(returnStatus); + return launchStatus; + }; +} +auto makeExecute_1_2Return(V1_0::ErrorStatus launchStatus, V1_0::ErrorStatus returnStatus, + const std::vector<OutputShape>& outputShapes, const Timing& timing) { + return [launchStatus, returnStatus, outputShapes, timing]( + const V1_0::Request& /*request*/, MeasureTiming /*measureTiming*/, + const sp<V1_2::IExecutionCallback>& cb) -> Return<V1_0::ErrorStatus> { + cb->notify_1_2(returnStatus, outputShapes, timing); + return launchStatus; + }; +} +auto makeExecute_1_3Return(V1_3::ErrorStatus launchStatus, V1_3::ErrorStatus returnStatus, + const std::vector<OutputShape>& outputShapes, const Timing& timing) { + return [launchStatus, returnStatus, outputShapes, timing]( + const V1_3::Request& /*request*/, MeasureTiming /*measureTiming*/, + const OptionalTimePoint& /*deadline*/, + const OptionalTimeoutDuration& /*loopTimeoutDuration*/, + const sp<V1_3::IExecutionCallback>& cb) -> Return<V1_3::ErrorStatus> { + cb->notify_1_3(returnStatus, outputShapes, timing); + return launchStatus; + }; +} +auto makeExecuteSynchronouslyReturn(V1_0::ErrorStatus status, + const std::vector<OutputShape>& outputShapes, + const Timing& timing) { + return [status, outputShapes, timing](const V1_0::Request& /*request*/, + MeasureTiming /*measureTiming*/, + const V1_2::IPreparedModel::executeSynchronously_cb& cb) { + cb(status, outputShapes, timing); + return Void(); + }; +} +auto makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus status, + const std::vector<OutputShape>& outputShapes, + const Timing& timing) { + return [status, outputShapes, timing]( + const V1_3::Request& /*request*/, MeasureTiming /*measureTiming*/, + const OptionalTimePoint& /*deadline*/, + const OptionalTimeoutDuration& /*loopTimeoutDuration*/, + const V1_3::IPreparedModel::executeSynchronously_1_3_cb& cb) { + cb(status, outputShapes, timing); + return Void(); + }; +} +auto makeConfigureExecutionBurst(V1_0::ErrorStatus status, + const sp<MockBurstContext>& burstContext) { + return [status, burstContext]( + const sp<V1_2::IBurstCallback>& /*callback*/, + const hardware::MQDescriptorSync<V1_2::FmqRequestDatum>& /*requestChannel*/, + const hardware::MQDescriptorSync<V1_2::FmqResultDatum>& /*resultChannel*/, + V1_2::IPreparedModel::configureExecutionBurst_cb cb) { + cb(status, burstContext); + return Void(); + }; +} +auto makeExecuteFencedReturn(V1_3::ErrorStatus status, const hidl_handle& syncFence, + const sp<IFencedExecutionCallback>& dispatchCallback) { + return [status, syncFence, dispatchCallback]( + const V1_3::Request& /*request*/, const hidl_vec<hidl_handle>& /*waitFor*/, + MeasureTiming /*measure*/, const OptionalTimePoint& /*deadline*/, + const OptionalTimeoutDuration& /*loopTimeoutDuration*/, + const OptionalTimeoutDuration& /*duration*/, + V1_3::IPreparedModel::executeFenced_cb cb) { + cb(status, syncFence, dispatchCallback); + return Void(); + }; +} + +// TODO: The "setupInitializationExpectation*" calls below re-specify the +// number of expected times each initialization method is called. Because +// this was originally set to `testing::AnyNumber()` when the object was +// created, do these calls act as no-ops, do they override the previous +// expectations, or are both expectations still active? + +void setupInitializationExpectationsV1_0(const sp<MockDevice>& mockDevice) { + EXPECT_CALL(*mockDevice, getCapabilities_1_1(_)).Times(0); + EXPECT_CALL(*mockDevice, getCapabilities_1_2(_)).Times(0); + EXPECT_CALL(*mockDevice, getCapabilities_1_3(_)).Times(0); + EXPECT_CALL(*mockDevice, getVersionString(_)).Times(0); + EXPECT_CALL(*mockDevice, getType(_)).Times(0); + EXPECT_CALL(*mockDevice, getSupportedExtensions(_)).Times(0); + EXPECT_CALL(*mockDevice, getNumberOfCacheFilesNeeded(_)).Times(0); +} + +void setupInitializationExpectationsV1_1(const sp<MockDevice>& mockDevice) { + EXPECT_CALL(*mockDevice, getCapabilities(_)).Times(0); + EXPECT_CALL(*mockDevice, getCapabilities_1_2(_)).Times(0); + EXPECT_CALL(*mockDevice, getCapabilities_1_3(_)).Times(0); + EXPECT_CALL(*mockDevice, getVersionString(_)).Times(0); + EXPECT_CALL(*mockDevice, getType(_)).Times(0); + EXPECT_CALL(*mockDevice, getSupportedExtensions(_)).Times(0); + EXPECT_CALL(*mockDevice, getNumberOfCacheFilesNeeded(_)).Times(0); +} + +void setupInitializationExpectationsV1_2(const sp<MockDevice>& mockDevice) { + EXPECT_CALL(*mockDevice, getCapabilities(_)).Times(0); + EXPECT_CALL(*mockDevice, getCapabilities_1_1(_)).Times(0); + EXPECT_CALL(*mockDevice, getCapabilities_1_3(_)).Times(0); +} + +void setupInitializationExpectationsV1_3(const sp<MockDevice>& mockDevice) { + EXPECT_CALL(*mockDevice, getCapabilities(_)).Times(0); + EXPECT_CALL(*mockDevice, getCapabilities_1_1(_)).Times(0); + EXPECT_CALL(*mockDevice, getCapabilities_1_2(_)).Times(0); +} + +void setupInitializationExpectations(const sp<MockDevice>& mockDevice, Version version) { + switch (version) { + case Version::V1_0: + setupInitializationExpectationsV1_0(mockDevice); + return; + case Version::V1_1: + setupInitializationExpectationsV1_1(mockDevice); + return; + case Version::V1_2: + setupInitializationExpectationsV1_2(mockDevice); + return; + case Version::V1_3: + setupInitializationExpectationsV1_3(mockDevice); + return; + case Version::MOCK: + setupInitializationExpectationsV1_3(mockDevice); + return; + } + LOG(FATAL) << "unrecognized version: " << static_cast<int>(version); +} + +void setupSuccessfulInitializationExpectations(const sp<MockDevice>& mockDevice, Version version) { + EXPECT_CALL(*mockDevice, linkToDeathRet()).Times(testing::AnyNumber()); + + const int numCallsForV1_0 = (version == Version::V1_0 ? 1 : 0); + EXPECT_CALL(*mockDevice, getCapabilities(_)).Times(numCallsForV1_0); + + const int numCallsForV1_1 = (version == Version::V1_1 ? 1 : 0); + EXPECT_CALL(*mockDevice, getCapabilities_1_1(_)).Times(numCallsForV1_1); + + const int numCallsForV1_2 = (version == Version::V1_2 ? 1 : 0); + EXPECT_CALL(*mockDevice, getCapabilities_1_2(_)).Times(numCallsForV1_2); + + const int numCallsForAtLeastV1_3 = (version >= Version::V1_3 ? 1 : 0); + EXPECT_CALL(*mockDevice, getCapabilities_1_3(_)).Times(numCallsForAtLeastV1_3); + + const int numCallsForAtLeastV1_2 = (version >= Version::V1_2 ? 1 : 0); + EXPECT_CALL(*mockDevice, getVersionString(_)).Times(numCallsForAtLeastV1_2); + EXPECT_CALL(*mockDevice, getType(_)).Times(numCallsForAtLeastV1_2); + EXPECT_CALL(*mockDevice, getSupportedExtensions(_)).Times(numCallsForAtLeastV1_2); + EXPECT_CALL(*mockDevice, getNumberOfCacheFilesNeeded(_)).Times(numCallsForAtLeastV1_2); +} + +std::shared_ptr<VersionedIDevice> makeVersionedIDeviceFrom(const sp<MockDevice>& mockDevice, + MockDeviceFactory* mockDeviceFactory, + Version version) { + setupInitializationExpectations(mockDevice, version); + const auto device = adaptAs(mockDevice, version); + ON_CALL(*mockDeviceFactory, Call(_)).WillByDefault(testing::Return(device)); + EXPECT_CALL(*mockDeviceFactory, Call(/*blocking=*/true)).Times(testing::AtLeast(1)); + const DeviceFactory makeDevice = mockDeviceFactory->AsStdFunction(); + return VersionedIDevice::create("MockDevice", makeDevice); +} + +std::shared_ptr<VersionedIDevice> makeVersionedIDeviceSuccessfulInitializationFrom( + const sp<MockDevice>& device, MockDeviceFactory* mockDeviceFactory, Version version) { + setupSuccessfulInitializationExpectations(device, version); + return makeVersionedIDeviceFrom(device, mockDeviceFactory, version); +} + +std::function<hardware::Status()> makeTransportFailure(status_t status) { + return [status] { return hardware::Status::fromStatusT(status); }; +} + +const auto makeGeneralTransportFailure = makeTransportFailure(NO_MEMORY); +const auto makeDeadObjectFailure = makeTransportFailure(DEAD_OBJECT); + +class VersionedIDeviceTest : public testing::Test { + protected: + const sp<MockDevice> kMockDevice = MockDevice::create(); + const std::unique_ptr<MockDeviceFactory> kMockMakeDevice = + std::make_unique<MockDeviceFactory>(); +}; + +class VersionedIDeviceInitializationTest : public VersionedIDeviceTest {}; + +template <Version version> +class VersionedIDeviceInitializedTest : public VersionedIDeviceTest { + protected: + void SetUp() override { + VersionedIDeviceTest::SetUp(); + ASSERT_NE(nullptr, kDevice.get()); + } + + const std::shared_ptr<VersionedIDevice> kDevice = + makeVersionedIDeviceSuccessfulInitializationFrom(kMockDevice, kMockMakeDevice.get(), + version); +}; + +class VersionedIDeviceV1_0Test : public VersionedIDeviceInitializedTest<Version::V1_0> {}; +class VersionedIDeviceV1_1Test : public VersionedIDeviceInitializedTest<Version::V1_1> {}; +class VersionedIDeviceV1_2Test : public VersionedIDeviceInitializedTest<Version::V1_2> {}; +class VersionedIDeviceV1_3Test : public VersionedIDeviceInitializedTest<Version::V1_3> {}; +class VersionedIDeviceMockTest : public VersionedIDeviceInitializedTest<Version::MOCK> {}; + +// Simulate initialization/link error + +TEST_F(VersionedIDeviceInitializationTest, creationFailure) { + // setup failure + EXPECT_CALL(*kMockMakeDevice, Call(_)).Times(1).WillOnce(testing::Return(nullptr)); + const DeviceFactory makeDevice = kMockMakeDevice->AsStdFunction(); + + // run test + const auto device = VersionedIDevice::create("MockDevice", makeDevice); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, linkToDeathTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, linkToDeathRet()) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + EXPECT_CALL(*kMockMakeDevice, Call(_)).Times(1).WillOnce(testing::Return(kMockDevice)); + const DeviceFactory makeDevice = kMockMakeDevice->AsStdFunction(); + + // run test + const auto device = VersionedIDevice::create("MockDevice", makeDevice); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, linkToDeathReturnError) { + // setup failure + const auto ret = []() -> Return<bool> { return false; }; + EXPECT_CALL(*kMockMakeDevice, Call(_)).Times(1).WillOnce(testing::Return(kMockDevice)); + EXPECT_CALL(*kMockDevice, linkToDeathRet()).Times(1).WillOnce(InvokeWithoutArgs(ret)); + const DeviceFactory makeDevice = kMockMakeDevice->AsStdFunction(); + + // run test + const auto device = VersionedIDevice::create("MockDevice", makeDevice); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getCapabilitiesFailure) { + // setup failure + const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, V1_0::Capabilities{}); + EXPECT_CALL(*kMockDevice, getCapabilities(_)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_0); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getCapabilities_1_1Failure) { + // setup failure + const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, V1_1::Capabilities{}); + EXPECT_CALL(*kMockDevice, getCapabilities_1_1(_)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_1); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getCapabilities_1_2Failure) { + // setup failure + const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, V1_2::Capabilities{}); + EXPECT_CALL(*kMockDevice, getCapabilities_1_2(_)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_2); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getCapabilities_1_3Failure) { + // setup failure + const auto ret = makeCallbackReturn(V1_3::ErrorStatus::GENERAL_FAILURE, V1_3::Capabilities{}); + EXPECT_CALL(*kMockDevice, getCapabilities_1_3(_)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_3); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getVersionStringFailure) { + // setup failure + const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, ""); + EXPECT_CALL(*kMockDevice, getVersionString(_)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_2); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getTypeFailure) { + // setup failure + const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, DeviceType::OTHER); + EXPECT_CALL(*kMockDevice, getType(_)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_2); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getSupportedExtensionsFailure) { + // setup failure + const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, hidl_vec<Extension>{}); + EXPECT_CALL(*kMockDevice, getSupportedExtensions(_)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_2); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getNumberOfCacheFilesNeededFailure) { + // setup failure + const auto ret = makeCallbackReturn(V1_0::ErrorStatus::GENERAL_FAILURE, kMaxNumberOfCacheFiles, + kMaxNumberOfCacheFiles); + EXPECT_CALL(*kMockDevice, getNumberOfCacheFilesNeeded(_)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_2); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, dataCacheFilesExceedsSpecifiedMax) { + // setup failure + const auto ret = makeCallbackReturn(V1_0::ErrorStatus::NONE, kMaxNumberOfCacheFiles + 1, + kMaxNumberOfCacheFiles); + EXPECT_CALL(*kMockDevice, getNumberOfCacheFilesNeeded(_)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_2); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, modelCacheFilesExceedsSpecifiedMax) { + // setup failure + const auto ret = makeCallbackReturn(V1_0::ErrorStatus::NONE, kMaxNumberOfCacheFiles, + kMaxNumberOfCacheFiles + 1); + EXPECT_CALL(*kMockDevice, getNumberOfCacheFilesNeeded(_)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_2); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getCapabilitiesTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, getCapabilities(_)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_0); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getCapabilities_1_1TransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, getCapabilities_1_1(_)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_1); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getCapabilities_1_2TransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, getCapabilities_1_2(_)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_2); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getCapabilities_1_3TransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, getCapabilities_1_3(_)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_3); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getVersionStringTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, getVersionString(_)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_2); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getTypeTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, getType(_)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_2); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getSupportedExtensionsTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, getSupportedExtensions(_)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_2); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +TEST_F(VersionedIDeviceInitializationTest, getNumberOfCacheFilesNeededTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, getNumberOfCacheFilesNeeded(_)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto device = makeVersionedIDeviceFrom(kMockDevice, kMockMakeDevice.get(), Version::V1_2); + + // verify failure + EXPECT_EQ(nullptr, device.get()); +} + +// Ensure device has cached metadata + +TEST_F(VersionedIDeviceV1_0Test, getCapabilities) { + // run test + const auto capabilities = kDevice->getCapabilities(); + const auto cached = kDevice->getCapabilities(); + + // verify success + EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar); + EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor); + EXPECT_LT(0u, capabilities.operandPerformance.size()); + EXPECT_EQ(cached, capabilities); +} + +TEST_F(VersionedIDeviceV1_1Test, getCapabilities) { + // run test + const auto capabilities = kDevice->getCapabilities(); + const auto cached = kDevice->getCapabilities(); + + // verify success + EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar); + EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor); + EXPECT_LT(0u, capabilities.operandPerformance.size()); + EXPECT_EQ(cached, capabilities); +} + +TEST_F(VersionedIDeviceV1_2Test, getCapabilities) { + // run test + const auto capabilities = kDevice->getCapabilities(); + const auto cached = kDevice->getCapabilities(); + + // verify success + EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar); + EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor); + EXPECT_EQ(0u, capabilities.operandPerformance.size()); + EXPECT_EQ(cached, capabilities); +} + +TEST_F(VersionedIDeviceV1_3Test, getCapabilities) { + // run test + const auto capabilities = kDevice->getCapabilities(); + const auto cached = kDevice->getCapabilities(); + + // verify success + EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceScalar); + EXPECT_EQ(PerformanceInfo{}, capabilities.relaxedFloat32toFloat16PerformanceTensor); + EXPECT_EQ(0u, capabilities.operandPerformance.size()); + EXPECT_EQ(cached, capabilities); +} + +TEST_F(VersionedIDeviceV1_0Test, getVersionString) { + // run test + const auto versionString = kDevice->getVersionString(); + const auto cached = kDevice->getVersionString(); + + // verify success + EXPECT_EQ("UNKNOWN", versionString); + EXPECT_EQ(cached, versionString); +} + +TEST_F(VersionedIDeviceV1_1Test, getVersionString) { + // run test + const auto versionString = kDevice->getVersionString(); + const auto cached = kDevice->getVersionString(); + + // verify success + EXPECT_EQ("UNKNOWN", versionString); + EXPECT_EQ(cached, versionString); +} + +TEST_F(VersionedIDeviceV1_2Test, getVersionString) { + // run test + const auto versionString = kDevice->getVersionString(); + const auto cached = kDevice->getVersionString(); + + // verify success + EXPECT_EQ("Google-MockV1", versionString); + EXPECT_EQ(cached, versionString); +} + +TEST_F(VersionedIDeviceV1_3Test, getVersionString) { + // run test + const auto versionString = kDevice->getVersionString(); + const auto cached = kDevice->getVersionString(); + + // verify success + EXPECT_EQ("Google-MockV1", versionString); + EXPECT_EQ(cached, versionString); +} + +TEST_F(VersionedIDeviceV1_0Test, getType) { + // run test + const auto type = kDevice->getType(); + const auto cached = kDevice->getType(); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_DEVICE_UNKNOWN, type); + EXPECT_EQ(cached, type); +} + +TEST_F(VersionedIDeviceV1_1Test, getType) { + // run test + const auto type = kDevice->getType(); + const auto cached = kDevice->getType(); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_DEVICE_UNKNOWN, type); + EXPECT_EQ(cached, type); +} + +TEST_F(VersionedIDeviceV1_2Test, getType) { + // run test + const auto type = kDevice->getType(); + const auto cached = kDevice->getType(); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_DEVICE_OTHER, type); + EXPECT_EQ(cached, type); +} + +TEST_F(VersionedIDeviceV1_3Test, getType) { + // run test + const auto type = kDevice->getType(); + const auto cached = kDevice->getType(); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_DEVICE_OTHER, type); + EXPECT_EQ(cached, type); +} + +TEST_F(VersionedIDeviceV1_0Test, getSupportedExtensions) { + // run test + const auto supportedExtensions = kDevice->getSupportedExtensions(); + const auto cached = kDevice->getSupportedExtensions(); + + // verify success + EXPECT_EQ(0u, supportedExtensions.size()); + EXPECT_EQ(cached, supportedExtensions); +} + +TEST_F(VersionedIDeviceV1_1Test, getSupportedExtensions) { + // run test + const auto supportedExtensions = kDevice->getSupportedExtensions(); + const auto cached = kDevice->getSupportedExtensions(); + + // verify success + EXPECT_EQ(0u, supportedExtensions.size()); + EXPECT_EQ(cached, supportedExtensions); +} + +TEST_F(VersionedIDeviceV1_2Test, getSupportedExtensions) { + // run test + const auto supportedExtensions = kDevice->getSupportedExtensions(); + const auto cached = kDevice->getSupportedExtensions(); + + // verify success + EXPECT_EQ(0u, supportedExtensions.size()); + EXPECT_EQ(cached, supportedExtensions); +} + +TEST_F(VersionedIDeviceV1_3Test, getSupportedExtensions) { + // run test + const auto supportedExtensions = kDevice->getSupportedExtensions(); + const auto cached = kDevice->getSupportedExtensions(); + + // verify success + EXPECT_EQ(0u, supportedExtensions.size()); + EXPECT_EQ(cached, supportedExtensions); +} + +TEST_F(VersionedIDeviceV1_0Test, getNumberOfCacheFilesNeeded) { + // run test + const auto [dataCacheFilesNeeded, modelCacheFilesNeeded] = + kDevice->getNumberOfCacheFilesNeeded(); + const auto [cachedDataCacheFilesNeeded, cachedModelCacheFilesNeeded] = + kDevice->getNumberOfCacheFilesNeeded(); + + // verify success + EXPECT_EQ(kNoCacheFilesNeeded, dataCacheFilesNeeded); + EXPECT_EQ(kNoCacheFilesNeeded, modelCacheFilesNeeded); + EXPECT_EQ(cachedDataCacheFilesNeeded, dataCacheFilesNeeded); + EXPECT_EQ(cachedModelCacheFilesNeeded, modelCacheFilesNeeded); +} + +TEST_F(VersionedIDeviceV1_1Test, getNumberOfCacheFilesNeeded) { + // run test + const auto [dataCacheFilesNeeded, modelCacheFilesNeeded] = + kDevice->getNumberOfCacheFilesNeeded(); + const auto [cachedDataCacheFilesNeeded, cachedModelCacheFilesNeeded] = + kDevice->getNumberOfCacheFilesNeeded(); + + // verify success + EXPECT_EQ(kNoCacheFilesNeeded, dataCacheFilesNeeded); + EXPECT_EQ(kNoCacheFilesNeeded, modelCacheFilesNeeded); + EXPECT_EQ(cachedDataCacheFilesNeeded, dataCacheFilesNeeded); + EXPECT_EQ(cachedModelCacheFilesNeeded, modelCacheFilesNeeded); +} + +TEST_F(VersionedIDeviceV1_2Test, getNumberOfCacheFilesNeeded) { + // run test + const auto [dataCacheFilesNeeded, modelCacheFilesNeeded] = + kDevice->getNumberOfCacheFilesNeeded(); + const auto [cachedDataCacheFilesNeeded, cachedModelCacheFilesNeeded] = + kDevice->getNumberOfCacheFilesNeeded(); + + // verify success + EXPECT_EQ(kMaxNumberOfCacheFiles, dataCacheFilesNeeded); + EXPECT_EQ(kMaxNumberOfCacheFiles, modelCacheFilesNeeded); + EXPECT_EQ(cachedDataCacheFilesNeeded, dataCacheFilesNeeded); + EXPECT_EQ(cachedModelCacheFilesNeeded, modelCacheFilesNeeded); +} + +TEST_F(VersionedIDeviceV1_3Test, getNumberOfCacheFilesNeeded) { + // run test + const auto [dataCacheFilesNeeded, modelCacheFilesNeeded] = + kDevice->getNumberOfCacheFilesNeeded(); + const auto [cachedDataCacheFilesNeeded, cachedModelCacheFilesNeeded] = + kDevice->getNumberOfCacheFilesNeeded(); + + // verify success + EXPECT_EQ(kMaxNumberOfCacheFiles, dataCacheFilesNeeded); + EXPECT_EQ(kMaxNumberOfCacheFiles, modelCacheFilesNeeded); + EXPECT_EQ(cachedDataCacheFilesNeeded, dataCacheFilesNeeded); + EXPECT_EQ(cachedModelCacheFilesNeeded, modelCacheFilesNeeded); +} + +TEST_F(VersionedIDeviceV1_0Test, getFeatureLevel) { + // run test + const auto featureLevel = kDevice->getFeatureLevel(); + const auto cached = kDevice->getFeatureLevel(); + + // verify success + constexpr int64_t expectedFeatureLevel = __ANDROID_API_O_MR1__; + EXPECT_EQ(expectedFeatureLevel, featureLevel); + EXPECT_EQ(cached, featureLevel); +} + +TEST_F(VersionedIDeviceV1_1Test, getFeatureLevel) { + // run test + const auto featureLevel = kDevice->getFeatureLevel(); + const auto cached = kDevice->getFeatureLevel(); + + // verify success + constexpr int64_t expectedFeatureLevel = __ANDROID_API_P__; + EXPECT_EQ(expectedFeatureLevel, featureLevel); + EXPECT_EQ(cached, featureLevel); +} + +TEST_F(VersionedIDeviceV1_2Test, getFeatureLevel) { + // run test + const auto featureLevel = kDevice->getFeatureLevel(); + const auto cached = kDevice->getFeatureLevel(); + + // verify success + constexpr int64_t expectedFeatureLevel = __ANDROID_API_Q__; + EXPECT_EQ(expectedFeatureLevel, featureLevel); + EXPECT_EQ(cached, featureLevel); +} + +TEST_F(VersionedIDeviceV1_3Test, getFeatureLevel) { + // run test + const auto featureLevel = kDevice->getFeatureLevel(); + const auto cached = kDevice->getFeatureLevel(); + + // verify success + constexpr int64_t expectedFeatureLevel = __ANDROID_API_R__; + EXPECT_EQ(expectedFeatureLevel, featureLevel); + EXPECT_EQ(cached, featureLevel); +} + +// Simulate successful test + +TEST_F(VersionedIDeviceV1_0Test, getSupportedOperations) { + // setup call + const auto ret = [](const auto& /*model*/, const auto cb) { + cb(V1_0::ErrorStatus::NONE, {}); + return Void(); + }; + EXPECT_CALL(*kMockDevice, getSupportedOperations(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto metaModel = MetaModel({}, /*strictSlicing=*/true); + const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel); + + // verify success + EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode); + EXPECT_EQ(0u, supportedOperations.size()); +} + +TEST_F(VersionedIDeviceV1_1Test, getSupportedOperations) { + // setup call + const auto ret = [](const auto& /*model*/, const auto cb) { + cb(V1_0::ErrorStatus::NONE, {}); + return Void(); + }; + EXPECT_CALL(*kMockDevice, getSupportedOperations_1_1(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto metaModel = MetaModel({}, /*strictSlicing=*/true); + const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel); + + // verify success + EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode); + EXPECT_EQ(0u, supportedOperations.size()); +} + +TEST_F(VersionedIDeviceV1_2Test, getSupportedOperations) { + // setup call + const auto ret = [](const auto& /*model*/, const auto cb) { + cb(V1_0::ErrorStatus::NONE, {}); + return Void(); + }; + EXPECT_CALL(*kMockDevice, getSupportedOperations_1_2(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto metaModel = MetaModel({}, /*strictSlicing=*/true); + const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel); + + // verify success + EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode); + EXPECT_EQ(0u, supportedOperations.size()); +} + +TEST_F(VersionedIDeviceV1_3Test, getSupportedOperations) { + // setup call + const auto ret = [](const auto& /*model*/, const auto cb) { + cb(V1_3::ErrorStatus::NONE, {}); + return Void(); + }; + EXPECT_CALL(*kMockDevice, getSupportedOperations_1_3(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto metaModel = MetaModel({}, /*strictSlicing=*/true); + const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel); + + // verify success + EXPECT_EQ(V1_3::ErrorStatus::NONE, resultCode); + EXPECT_EQ(0u, supportedOperations.size()); +} + +TEST_F(VersionedIDeviceV1_0Test, prepareModel) { + // setup call + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModelReturn(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, + mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_NE(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_1Test, prepareModel) { + // setup call + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModel_1_1Return(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, + mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_NE(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_2Test, prepareModel) { + // setup call + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModel_1_2Return(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, + mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_NE(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_3Test, prepareModel) { + // setup call + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModel_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, + mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_NE(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_0Test, allocate) { + // run test + const auto [status, buffer, token] = kDevice->allocate({}, {}, {}, {}); + + // verify success + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, status); + EXPECT_EQ(nullptr, buffer.get()); + EXPECT_EQ(0u, token); +} + +TEST_F(VersionedIDeviceV1_1Test, allocate) { + // run test + const auto [status, buffer, token] = kDevice->allocate({}, {}, {}, {}); + + // verify success + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, status); + EXPECT_EQ(nullptr, buffer.get()); + EXPECT_EQ(0u, token); +} + +TEST_F(VersionedIDeviceV1_2Test, allocate) { + // run test + const auto [status, buffer, token] = kDevice->allocate({}, {}, {}, {}); + + // verify success + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, status); + EXPECT_EQ(nullptr, buffer.get()); + EXPECT_EQ(0u, token); +} + +TEST_F(VersionedIDeviceV1_3Test, allocate) { + // setup call + const sp<MockBuffer> mockBuffer = new MockBuffer(); + constexpr uint32_t mockToken = 1; + const auto ret = [mockBuffer](const BufferDesc& /*desc*/, + const hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/, + const hidl_vec<BufferRole>& /*inputRoles*/, + const hidl_vec<BufferRole>& /*outputRoles*/, + V1_3::IDevice::allocate_cb cb) -> Return<void> { + cb(V1_3::ErrorStatus::NONE, mockBuffer, mockToken); + return Void(); + }; + EXPECT_CALL(*kMockDevice, allocate(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [status, buffer, token] = kDevice->allocate({}, {}, {}, {}); + + // verify success + EXPECT_EQ(V1_3::ErrorStatus::NONE, status); + EXPECT_NE(nullptr, buffer.get()); + EXPECT_NE(0u, token); +} + +TEST_F(VersionedIDeviceMockTest, wait) { + // setup call + const auto ret = []() -> Return<void> { return {}; }; + EXPECT_CALL(*kMockDevice, ping()).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto resultCode = kDevice->wait(); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); +} + +// Simulate general failure + +TEST_F(VersionedIDeviceV1_0Test, getSupportedOperationsFailure) { + // setup failure + const auto ret = [](const auto& /*model*/, const auto cb) { + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}); + return Void(); + }; + EXPECT_CALL(*kMockDevice, getSupportedOperations(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto metaModel = MetaModel({}, /*strictSlicing=*/true); + const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel); + + // verify failure + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode); + EXPECT_EQ(0u, supportedOperations.size()); +} + +TEST_F(VersionedIDeviceV1_1Test, getSupportedOperationsFailure) { + // setup failure + const auto ret = [](const auto& /*model*/, const auto cb) { + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}); + return Void(); + }; + EXPECT_CALL(*kMockDevice, getSupportedOperations_1_1(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto metaModel = MetaModel({}, /*strictSlicing=*/true); + const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel); + + // verify failure + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode); + EXPECT_EQ(0u, supportedOperations.size()); +} + +TEST_F(VersionedIDeviceV1_2Test, getSupportedOperationsFailure) { + // setup failure + const auto ret = [](const auto& /*model*/, const auto cb) { + cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}); + return Void(); + }; + EXPECT_CALL(*kMockDevice, getSupportedOperations_1_2(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto metaModel = MetaModel({}, /*strictSlicing=*/true); + const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel); + + // verify failure + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode); + EXPECT_EQ(0u, supportedOperations.size()); +} + +TEST_F(VersionedIDeviceV1_3Test, getSupportedOperationsFailure) { + // setup failure + const auto ret = [](const auto& /*model*/, const auto cb) { + cb(V1_3::ErrorStatus::GENERAL_FAILURE, {}); + return Void(); + }; + EXPECT_CALL(*kMockDevice, getSupportedOperations_1_3(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto metaModel = MetaModel({}, /*strictSlicing=*/true); + const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel); + + // verify failure + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode); + EXPECT_EQ(0u, supportedOperations.size()); +} + +TEST_F(VersionedIDeviceV1_0Test, prepareModelLaunchFailure) { + // setup failure + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModelReturn(V1_0::ErrorStatus::GENERAL_FAILURE, + V1_0::ErrorStatus::NONE, mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_1Test, prepareModelLaunchFailure) { + // setup failure + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModel_1_1Return(V1_0::ErrorStatus::GENERAL_FAILURE, + V1_0::ErrorStatus::NONE, mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_2Test, prepareModelLaunchFailure) { + // setup failure + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModel_1_2Return(V1_0::ErrorStatus::GENERAL_FAILURE, + V1_0::ErrorStatus::NONE, mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_3Test, prepareModelLaunchFailure) { + // setup failure + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModel_1_3Return(V1_3::ErrorStatus::GENERAL_FAILURE, + V1_3::ErrorStatus::NONE, mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_0Test, prepareModelReturnFailure) { + // setup failure + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModelReturn(V1_0::ErrorStatus::NONE, + V1_0::ErrorStatus::GENERAL_FAILURE, mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_1Test, prepareModelReturnFailure) { + // setup failure + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModel_1_1Return( + V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE, mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_2Test, prepareModelReturnFailure) { + // setup failure + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModel_1_2Return( + V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE, mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_3Test, prepareModelReturnFailure) { + // setup failure + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModel_1_3Return( + V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::GENERAL_FAILURE, mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_0Test, prepareModelNullptrError) { + // setup failure + const sp<MockPreparedModel> mockPreparedModel = nullptr; + const auto ret = makePreparedModelReturn(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, + mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_1Test, prepareModelNullptrError) { + // setup failure + const sp<MockPreparedModel> mockPreparedModel = nullptr; + const auto ret = makePreparedModel_1_1Return(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, + mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_2Test, prepareModelNullptrError) { + // setup failure + const sp<MockPreparedModel> mockPreparedModel = nullptr; + const auto ret = makePreparedModel_1_2Return(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, + mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_3Test, prepareModelNullptrError) { + // setup failure + const sp<MockPreparedModel> mockPreparedModel = nullptr; + const auto ret = makePreparedModel_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, + mockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_3Test, allocateFailure) { + // setup failure + const auto ret = [](const BufferDesc& /*desc*/, + const hidl_vec<sp<V1_3::IPreparedModel>>& /*preparedModels*/, + const hidl_vec<BufferRole>& /*inputRoles*/, + const hidl_vec<BufferRole>& /*outputRoles*/, + V1_3::IDevice::allocate_cb cb) -> Return<void> { + cb(V1_3::ErrorStatus::GENERAL_FAILURE, nullptr, 0); + return Void(); + }; + EXPECT_CALL(*kMockDevice, allocate(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [status, buffer, token] = kDevice->allocate({}, {}, {}, {}); + + // verify failure + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, status); + EXPECT_EQ(nullptr, buffer.get()); + EXPECT_EQ(0u, token); +} + +// Simulate transport failure + +TEST_F(VersionedIDeviceV1_0Test, getSupportedOperationsTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, getSupportedOperations(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto metaModel = MetaModel({}, /*strictSlicing=*/true); + const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel); + + // verify failure + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode); + EXPECT_EQ(0u, supportedOperations.size()); +} + +TEST_F(VersionedIDeviceV1_1Test, getSupportedOperationsTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, getSupportedOperations_1_1(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto metaModel = MetaModel({}, /*strictSlicing=*/true); + const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel); + + // verify failure + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode); + EXPECT_EQ(0u, supportedOperations.size()); +} + +TEST_F(VersionedIDeviceV1_2Test, getSupportedOperationsTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, getSupportedOperations_1_2(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto metaModel = MetaModel({}, /*strictSlicing=*/true); + const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel); + + // verify failure + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode); + EXPECT_EQ(0u, supportedOperations.size()); +} + +TEST_F(VersionedIDeviceV1_3Test, getSupportedOperationsTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, getSupportedOperations_1_3(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto metaModel = MetaModel({}, /*strictSlicing=*/true); + const auto [resultCode, supportedOperations] = kDevice->getSupportedOperations(metaModel); + + // verify failure + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, resultCode); + EXPECT_EQ(0u, supportedOperations.size()); +} + +TEST_F(VersionedIDeviceV1_0Test, prepareModelTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, prepareModel(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_1Test, prepareModelTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, prepareModel_1_1(_, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_2Test, prepareModelTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, prepareModel_1_2(_, _, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_3Test, prepareModelTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceV1_3Test, allocateTransportFailure) { + // setup failure + EXPECT_CALL(*kMockDevice, allocate(_, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [status, buffer, token] = kDevice->allocate({}, {}, {}, {}); + + // verify failure + EXPECT_EQ(V1_3::ErrorStatus::GENERAL_FAILURE, status); + EXPECT_EQ(nullptr, buffer.get()); + EXPECT_EQ(0u, token); +} + +TEST_F(VersionedIDeviceMockTest, waitTransportFailure) { + // setup call + EXPECT_CALL(*kMockDevice, ping()) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto resultCode = kDevice->wait(); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); +} + +// Simulate service crash + +// TODO: enable this test once b/154183300 is fixed. +TEST_F(VersionedIDeviceMockTest, DISABLED_prepareModelRecoverCrash) { + // setup original device calls + EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + EXPECT_CALL(*kMockDevice, ping()).Times(1).WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + + // setup recovery call + const sp<MockDevice> mockRecoveredDevice = MockDevice::create(); + EXPECT_CALL(*kMockMakeDevice, Call(/*blocking=*/false)) + .Times(1) + .WillOnce(testing::Return(mockRecoveredDevice)); + + // setup recovered device calls + const sp<MockPreparedModel> mockPreparedModel = MockPreparedModel::create(); + const auto ret = makePreparedModel_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, + mockPreparedModel); + EXPECT_CALL(*mockRecoveredDevice, linkToDeathRet()).Times(1); + EXPECT_CALL(*mockRecoveredDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_NE(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceMockTest, prepareModelFullCrash) { + // setup failure + EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)) + .Times(1) + .WillRepeatedly(InvokeWithoutArgs(makeDeadObjectFailure)); + EXPECT_CALL(*kMockDevice, ping()) + .Times(1) + .WillRepeatedly(InvokeWithoutArgs(makeDeadObjectFailure)); + EXPECT_CALL(*kMockMakeDevice, Call(/*blocking=*/false)) + .Times(1) + .WillOnce(testing::Return(nullptr)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_DEAD_OBJECT, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceMockTest, prepareModelAsyncCrash) { + // setup failure + const auto ret = [this]() -> Return<V1_3::ErrorStatus> { + kMockDevice->simulateCrash(); + return V1_3::ErrorStatus::NONE; + }; + EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_DEAD_OBJECT, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIDeviceMockTest, waitCrash) { + // setup failure + EXPECT_CALL(*kMockDevice, ping()) + .Times(1) + .WillRepeatedly(InvokeWithoutArgs(makeDeadObjectFailure)); + EXPECT_CALL(*kMockMakeDevice, Call(/*blocking=*/true)) + .Times(1) + .WillOnce(testing::Return(nullptr)); + + // run test + const auto resultCode = kDevice->wait(); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); +} + +TEST_F(VersionedIDeviceMockTest, waitRecoverCrash) { + // setup original device calls + EXPECT_CALL(*kMockDevice, ping()).Times(1).WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + + // setup recovery call + const sp<MockDevice> mockRecoveredDevice = MockDevice::create(); + EXPECT_CALL(*kMockMakeDevice, Call(/*blocking=*/true)) + .Times(1) + .WillOnce(testing::Return(mockRecoveredDevice)); + + // setup recovered device calls + const auto ret = []() -> Return<bool> { return true; }; + EXPECT_CALL(*mockRecoveredDevice, linkToDeathRet()).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto resultCode = kDevice->wait(); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); +} + +TEST_F(VersionedIDeviceMockTest, waitFailedRecoverCrash) { + // setup original device calls + EXPECT_CALL(*kMockDevice, ping()).Times(1).WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + + // setup recovery call + const sp<MockDevice> mockRecoveredDevice = MockDevice::create(); + EXPECT_CALL(*kMockMakeDevice, Call(/*blocking=*/true)) + .Times(1) + .WillOnce(testing::Return(mockRecoveredDevice)); + + // setup recovered device calls + EXPECT_CALL(*mockRecoveredDevice, linkToDeathRet()) + .Times(1) + .WillOnce(makeGeneralTransportFailure); + + // run test + const auto resultCode = kDevice->wait(); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); +} + +// Harness for VersionedIPreparedModel failures. + +class VersionedIPreparedModelInitializationTest : public VersionedIDeviceMockTest { + protected: + const sp<MockPreparedModel> kMockPreparedModel = MockPreparedModel::create(); +}; + +std::shared_ptr<VersionedIPreparedModel> makeVersionedIPreparedModelSuccessfulInitializationFrom( + const sp<MockDevice>& mockDevice, const sp<MockPreparedModel>& mockPreparedModel, + const VersionedIDevice& device) { + const auto retV1_0 = makePreparedModelReturn(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, + mockPreparedModel); + const auto retV1_1 = makePreparedModel_1_1Return(V1_0::ErrorStatus::NONE, + V1_0::ErrorStatus::NONE, mockPreparedModel); + const auto retV1_2 = makePreparedModel_1_2Return(V1_0::ErrorStatus::NONE, + V1_0::ErrorStatus::NONE, mockPreparedModel); + const auto retV1_3 = makePreparedModel_1_3Return(V1_3::ErrorStatus::NONE, + V1_3::ErrorStatus::NONE, mockPreparedModel); + + ON_CALL(*mockDevice, prepareModel(_, _)).WillByDefault(Invoke(retV1_0)); + ON_CALL(*mockDevice, prepareModel_1_1(_, _, _)).WillByDefault(Invoke(retV1_1)); + ON_CALL(*mockDevice, prepareModel_1_2(_, _, _, _, _, _)).WillByDefault(Invoke(retV1_2)); + ON_CALL(*mockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)).WillByDefault(Invoke(retV1_3)); + + EXPECT_CALL(*mockDevice, prepareModel(_, _)).Times(testing::AnyNumber()); + EXPECT_CALL(*mockDevice, prepareModel_1_1(_, _, _)).Times(testing::AnyNumber()); + EXPECT_CALL(*mockDevice, prepareModel_1_2(_, _, _, _, _, _)).Times(testing::AnyNumber()); + EXPECT_CALL(*mockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)).Times(testing::AnyNumber()); + + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = device.prepareModel(makeModel, {}, {}, {}, {}, {}); + + CHECK_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + CHECK(preparedModel != nullptr); + + return preparedModel; +} + +template <Version version> +class VersionedIPreparedModelTest : public VersionedIDeviceInitializedTest<version> { + using Base = VersionedIDeviceInitializedTest<version>; + + protected: + void SetUp() override { + VersionedIDeviceInitializedTest<version>::SetUp(); + ASSERT_NE(nullptr, kPreparedModel.get()); + } + + const sp<MockPreparedModel> kMockPreparedModel = MockPreparedModel::create(); + const std::shared_ptr<VersionedIPreparedModel> kPreparedModel = + makeVersionedIPreparedModelSuccessfulInitializationFrom( + Base::kMockDevice, kMockPreparedModel, *Base::kDevice); +}; + +class VersionedIPreparedModelV1_0Test : public VersionedIPreparedModelTest<Version::V1_0> {}; +class VersionedIPreparedModelV1_1Test : public VersionedIPreparedModelTest<Version::V1_1> {}; +class VersionedIPreparedModelV1_2Test : public VersionedIPreparedModelTest<Version::V1_2> {}; +class VersionedIPreparedModelV1_3Test : public VersionedIPreparedModelTest<Version::V1_3> {}; +class VersionedIPreparedModelMockTest : public VersionedIPreparedModelTest<Version::MOCK> {}; + +// Simulate initialization/link error + +TEST_F(VersionedIPreparedModelInitializationTest, linkToDeathTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, linkToDeathRet()) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + const auto ret = makePreparedModel_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, + kMockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIPreparedModelInitializationTest, linkToDeathDeadObject) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, linkToDeathRet()) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + const auto ret = makePreparedModel_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, + kMockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_DEAD_OBJECT, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +TEST_F(VersionedIPreparedModelInitializationTest, linkToDeathReturnError) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, linkToDeathRet()) + .Times(1) + .WillOnce(InvokeWithoutArgs([]() -> Return<bool> { return false; })); + const auto ret = makePreparedModel_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, + kMockPreparedModel); + EXPECT_CALL(*kMockDevice, prepareModel_1_3(_, _, _, _, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const ModelFactory makeModel = [] { return V1_3::Model{}; }; + const auto [resultCode, preparedModel] = kDevice->prepareModel(makeModel, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, preparedModel.get()); +} + +// Simulate successful test + +TEST_F(VersionedIPreparedModelV1_0Test, executeAsync) { + // setup call + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_1Test, executeAsync) { + // setup call + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, executeAsync) { + // setup call + const auto ret = + makeExecute_1_2Return(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {}, kNoTiming); + EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_3Test, executeAsync) { + // setup call + const auto ret = + makeExecute_1_3Return(V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, {}, kNoTiming); + EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_0Test, executePreferSync) { + // setup call + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_1Test, executePreferSync) { + // setup call + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, executePreferSync) { + // setup call + const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming); + EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_3Test, executePreferSync) { + // setup call + const auto ret = makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::NONE, {}, kNoTiming); + EXPECT_CALL(*kMockPreparedModel, executeSynchronously_1_3(_, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_0Test, executeFenced) { + // setup call + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, syncFence, dispatchCallback, timing] = + kPreparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, dispatchCallback.get()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_1Test, executeFenced) { + // setup call + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, syncFence, dispatchCallback, timing] = + kPreparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, dispatchCallback.get()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, executeFenced) { + // setup call + const auto ret = makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::NONE, {}, kNoTiming); + EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, syncFence, dispatchCallback, timing] = + kPreparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, dispatchCallback.get()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_3Test, executeFenced) { + // setup call + auto memory = allocateSharedMemory(4); + hidl_handle fakeSyncFence(memory.handle()); + const sp<IFencedExecutionCallback> callback = new MockFencedExecutionCallback(); + const auto ret = makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, fakeSyncFence, callback); + EXPECT_CALL(*kMockPreparedModel, executeFenced(_, _, _, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, syncFence, dispatchCallback, timing] = + kPreparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify success + EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); + EXPECT_NE(nullptr, syncFence.getNativeHandle()); + EXPECT_NE(nullptr, dispatchCallback.get()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_0Test, configureExecutionBurst) { + // run test + const auto executionBurstController = + kPreparedModel->configureExecutionBurst(/*preferPowerOverLatency=*/false); + + // verify success + EXPECT_EQ(nullptr, executionBurstController); +} + +TEST_F(VersionedIPreparedModelV1_1Test, configureExecutionBurst) { + // run test + const auto executionBurstController = + kPreparedModel->configureExecutionBurst(/*preferPowerOverLatency=*/false); + + // verify success + EXPECT_EQ(nullptr, executionBurstController); +} + +TEST_F(VersionedIPreparedModelV1_2Test, configureExecutionBurst) { + // setup call + const sp<MockBurstContext> burstContext = new MockBurstContext(); + const auto ret = makeConfigureExecutionBurst(V1_0::ErrorStatus::NONE, burstContext); + EXPECT_CALL(*kMockPreparedModel, configureExecutionBurst(_, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const auto executionBurstController = + kPreparedModel->configureExecutionBurst(/*preferPowerOverLatency=*/false); + + // verify success + EXPECT_NE(nullptr, executionBurstController); +} + +TEST_F(VersionedIPreparedModelV1_3Test, configureExecutionBurst) { + // setup call + const sp<MockBurstContext> burstContext = new MockBurstContext(); + const auto ret = makeConfigureExecutionBurst(V1_0::ErrorStatus::NONE, burstContext); + EXPECT_CALL(*kMockPreparedModel, configureExecutionBurst(_, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const auto executionBurstController = + kPreparedModel->configureExecutionBurst(/*preferPowerOverLatency=*/false); + + // verify success + EXPECT_NE(nullptr, executionBurstController); +} + +// Simulate general failure + +TEST_F(VersionedIPreparedModelV1_0Test, executeAsyncLaunchFailure) { + // setup failure + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::GENERAL_FAILURE, V1_0::ErrorStatus::NONE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_1Test, executeAsyncLaunchFailure) { + // setup failure + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::GENERAL_FAILURE, V1_0::ErrorStatus::NONE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncLaunchFailure) { + // setup failure + const auto ret = makeExecute_1_2Return(V1_0::ErrorStatus::GENERAL_FAILURE, + V1_0::ErrorStatus::NONE, {}, kNoTiming); + EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_3Test, executeAsyncLaunchFailure) { + // setup failure + const auto ret = makeExecute_1_3Return(V1_3::ErrorStatus::GENERAL_FAILURE, + V1_3::ErrorStatus::NONE, {}, kNoTiming); + EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_0Test, executeAsyncReturnFailure) { + // setup failure + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_1Test, executeAsyncReturnFailure) { + // setup failure + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncReturnFailure) { + // setup failure + const auto ret = makeExecute_1_2Return(V1_0::ErrorStatus::NONE, + V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming); + EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_3Test, executeAsyncReturnFailure) { + // setup failure + const auto ret = makeExecute_1_3Return(V1_3::ErrorStatus::NONE, + V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming); + EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_0Test, executePreferSyncFailure) { + // setup failure + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::GENERAL_FAILURE, + V1_0::ErrorStatus::GENERAL_FAILURE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_1Test, executePreferSyncFailure) { + // setup failure + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::GENERAL_FAILURE, + V1_0::ErrorStatus::GENERAL_FAILURE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, executePreferSyncFailure) { + // setup failure + const auto ret = + makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming); + EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_3Test, executePreferSyncFailure) { + // setup failure + const auto ret = + makeExecuteSynchronously_1_3Return(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming); + EXPECT_CALL(*kMockPreparedModel, executeSynchronously_1_3(_, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_0Test, executeFencedFailure) { + // setup failure + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::GENERAL_FAILURE, + V1_0::ErrorStatus::GENERAL_FAILURE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, syncFence, dispatchCallback, timing] = + kPreparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, dispatchCallback.get()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_1Test, executeFencedFailure) { + // setup failure + const auto ret = makeExecuteReturn(V1_0::ErrorStatus::GENERAL_FAILURE, + V1_0::ErrorStatus::GENERAL_FAILURE); + EXPECT_CALL(*kMockPreparedModel, execute(_, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, syncFence, dispatchCallback, timing] = + kPreparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, dispatchCallback.get()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, executeFencedFailure) { + // setup failure + const auto ret = + makeExecuteSynchronouslyReturn(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming); + EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)).Times(1).WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, syncFence, dispatchCallback, timing] = + kPreparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, dispatchCallback.get()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_3Test, executeFencedFailure) { + // setup failure + auto memory = allocateSharedMemory(4); + hidl_handle fakeSyncFence(memory.handle()); + const sp<IFencedExecutionCallback> callback = new MockFencedExecutionCallback(); + const auto ret = + makeExecuteFencedReturn(V1_3::ErrorStatus::GENERAL_FAILURE, fakeSyncFence, callback); + EXPECT_CALL(*kMockPreparedModel, executeFenced(_, _, _, _, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const auto [resultCode, syncFence, dispatchCallback, timing] = + kPreparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, dispatchCallback.get()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, configureExecutionBurstFailure) { + // setup failure + const sp<MockBurstContext> burstContext = new MockBurstContext(); + const auto ret = makeConfigureExecutionBurst(V1_0::ErrorStatus::GENERAL_FAILURE, burstContext); + EXPECT_CALL(*kMockPreparedModel, configureExecutionBurst(_, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const auto executionBurstController = + kPreparedModel->configureExecutionBurst(/*preferPowerOverLatency=*/false); + + // verify failure + EXPECT_EQ(nullptr, executionBurstController); +} + +TEST_F(VersionedIPreparedModelV1_3Test, configureExecutionBurstFailure) { + // setup failure + const sp<MockBurstContext> burstContext = new MockBurstContext(); + const auto ret = makeConfigureExecutionBurst(V1_0::ErrorStatus::GENERAL_FAILURE, burstContext); + EXPECT_CALL(*kMockPreparedModel, configureExecutionBurst(_, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const auto executionBurstController = + kPreparedModel->configureExecutionBurst(/*preferPowerOverLatency=*/false); + + // verify failure + EXPECT_EQ(nullptr, executionBurstController); +} + +TEST_F(VersionedIPreparedModelV1_2Test, configureExecutionBurstNullptrError) { + // setup failure + const auto ret = makeConfigureExecutionBurst(V1_0::ErrorStatus::NONE, nullptr); + EXPECT_CALL(*kMockPreparedModel, configureExecutionBurst(_, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const auto executionBurstController = + kPreparedModel->configureExecutionBurst(/*preferPowerOverLatency=*/false); + + // verify failure + EXPECT_EQ(nullptr, executionBurstController); +} + +TEST_F(VersionedIPreparedModelV1_3Test, configureExecutionBurstNullptrError) { + // setup failure + const auto ret = makeConfigureExecutionBurst(V1_0::ErrorStatus::NONE, nullptr); + EXPECT_CALL(*kMockPreparedModel, configureExecutionBurst(_, _, _, _)) + .Times(1) + .WillOnce(Invoke(ret)); + + // run test + const auto executionBurstController = + kPreparedModel->configureExecutionBurst(/*preferPowerOverLatency=*/false); + + // verify failure + EXPECT_EQ(nullptr, executionBurstController); +} + +// Simulate transport failure + +TEST_F(VersionedIPreparedModelV1_0Test, executeAsyncTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, execute(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_1Test, executeAsyncTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, execute(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_3Test, executeAsyncTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_0Test, executePreferSyncTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, execute(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_1Test, executePreferSyncTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, execute(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, executePreferSyncTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_3Test, executePreferSyncTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, executeSynchronously_1_3(_, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_0Test, executeFencedTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, execute(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [resultCode, syncFence, dispatchCallback, timing] = + kPreparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, dispatchCallback.get()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_1Test, executeFencedTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, execute(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [resultCode, syncFence, dispatchCallback, timing] = + kPreparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, dispatchCallback.get()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, executeFencedTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [resultCode, syncFence, dispatchCallback, timing] = + kPreparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, dispatchCallback.get()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_3Test, executeFencedTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, executeFenced(_, _, _, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto [resultCode, syncFence, dispatchCallback, timing] = + kPreparedModel->executeFenced({}, {}, {}, {}, {}, {}); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); + EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, dispatchCallback.get()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, configureExecutionBurstTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, configureExecutionBurst(_, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto executionBurstController = + kPreparedModel->configureExecutionBurst(/*preferPowerOverLatency=*/false); + + // verify failure + EXPECT_EQ(nullptr, executionBurstController); +} + +TEST_F(VersionedIPreparedModelV1_3Test, configureExecutionBurstTransportFailure) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, configureExecutionBurst(_, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); + + // run test + const auto executionBurstController = + kPreparedModel->configureExecutionBurst(/*preferPowerOverLatency=*/false); + + // verify failure + EXPECT_EQ(nullptr, executionBurstController); +} + +// Simulate service crash + +TEST_F(VersionedIPreparedModelV1_0Test, executeAsyncLaunchCrash) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, execute(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_DEAD_OBJECT, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_1Test, executeAsyncLaunchCrash) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, execute(_, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_DEAD_OBJECT, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, executeAsyncLaunchCrash) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, execute_1_2(_, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_DEAD_OBJECT, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_3Test, executeAsyncLaunchCrash) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_DEAD_OBJECT, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_2Test, executePreferSyncCrash) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, executeSynchronously(_, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_DEAD_OBJECT, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelV1_3Test, executePreferSyncCrash) { + // setup failure + EXPECT_CALL(*kMockPreparedModel, executeSynchronously_1_3(_, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/true); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_DEAD_OBJECT, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +TEST_F(VersionedIPreparedModelMockTest, executeAsyncReturnCrash) { + // setup failure + const auto ret = [this]() -> Return<V1_3::ErrorStatus> { + kMockPreparedModel->simulateCrash(); + return V1_3::ErrorStatus::NONE; + }; + EXPECT_CALL(*kMockPreparedModel, execute_1_3(_, _, _, _, _)) + .Times(1) + .WillOnce(InvokeWithoutArgs(ret)); + + // run test + const auto [resultCode, outputShapes, timing] = + kPreparedModel->execute({}, {}, {}, {}, /*preferSynchronous=*/false); + + // verify failure + EXPECT_EQ(ANEURALNETWORKS_DEAD_OBJECT, resultCode); + EXPECT_EQ(0u, outputShapes.size()); + EXPECT_EQ(kNoTiming, timing); +} + +} // namespace +} // namespace android::nn
diff --git a/runtime/test/TestWrapper.cpp b/runtime/test/TestWrapper.cpp index d75f677..1ab8f95 100644 --- a/runtime/test/TestWrapper.cpp +++ b/runtime/test/TestWrapper.cpp
@@ -14,10 +14,10 @@ * limitations under the License. */ -#include <gtest/gtest.h> - #include "NeuralNetworksWrapper.h" +#include <gtest/gtest.h> + using namespace ::android::nn::wrapper; // This file tests certain aspects of the interfaces from NeuralNetworksWrapper.h.
diff --git a/runtime/test/android_fuzzing/Android.bp b/runtime/test/android_fuzzing/Android.bp index 7ce1386..84e69f0 100644 --- a/runtime/test/android_fuzzing/Android.bp +++ b/runtime/test/android_fuzzing/Android.bp
@@ -14,17 +14,6 @@ * limitations under the License. */ -package { - // See: http://go/android-license-faq - // A large-scale-change added 'default_applicable_licenses' to import - // all of the 'license_kinds' from "packages_modules_NeuralNetworks_runtime_license" - // to get the below license kinds: - // SPDX-license-identifier-Apache-2.0 - default_applicable_licenses: [ - "packages_modules_NeuralNetworks_runtime_license", - ], -} - cc_library_static { name: "libneuralnetworks_fuzzer_proto", owner: "google", @@ -36,44 +25,6 @@ shared_libs: ["libprotobuf-cpp-full"], } -cc_library_static { - name: "libneuralnetworks_fuzzer_harness", - owner: "google", - srcs: [ - "Converter.cpp", - "FuzzHarness.cpp", - "StaticAssert.cpp", - ], - shared_libs: [ - "[email protected]", - "libhidlmemory", - "libnativewindow", - "libprotobuf-cpp-full", - ], - static_libs: [ - "libneuralnetworks_common", - "libneuralnetworks_generated_test_harness", - "libneuralnetworks_static", - ], - whole_static_libs: [ - "libneuralnetworks_fuzzer_proto", - "libprotobuf-mutator", - "neuralnetworks_types", - ], -} - -filegroup { - name: "libneuralnetworks_fuzzer_corpus", - srcs: ["corpus/*"], -} - -cc_defaults { - name: "libneuralnetworks_fuzzer_defaults", - shared_libs: ["libprotobuf-cpp-full"], - static_libs: ["libneuralnetworks_fuzzer_harness"], - corpus: [":libneuralnetworks_fuzzer_corpus"], -} - // The following host binary is disabled because the genrule that uses this // binary (below) is disabled and because this binary is 300MB large currently. cc_binary_host {
diff --git a/runtime/test/android_fuzzing/Converter.cpp b/runtime/test/android_fuzzing/Converter.cpp index 4733b3b..6eb9da6 100644 --- a/runtime/test/android_fuzzing/Converter.cpp +++ b/runtime/test/android_fuzzing/Converter.cpp
@@ -16,9 +16,6 @@ #include "Converter.h" -#include <android-base/logging.h> -#include <nnapi/TypeUtils.h> - #include <algorithm> #include <random> #include <utility> @@ -28,62 +25,60 @@ namespace { using namespace test_helper; +using namespace android_nn_fuzz; constexpr uint32_t kMaxSize = 65536; -TestOperandType convert(android_nn_fuzz::OperandType type) { +TestOperandType convert(OperandType type) { return static_cast<TestOperandType>(type); } -TestOperationType convert(android_nn_fuzz::OperationType type) { +TestOperationType convert(OperationType type) { return static_cast<TestOperationType>(type); } -TestOperandLifeTime convert(android_nn_fuzz::OperandLifeTime lifetime) { +TestOperandLifeTime convert(OperandLifeTime lifetime) { return static_cast<TestOperandLifeTime>(lifetime); } -std::vector<float> convert(const android_nn_fuzz::Scales& scales) { +std::vector<float> convert(const Scales& scales) { const auto& repeatedScale = scales.scale(); return std::vector<float>(repeatedScale.begin(), repeatedScale.end()); } -TestSymmPerChannelQuantParams convert(const android_nn_fuzz::SymmPerChannelQuantParams& params) { +TestSymmPerChannelQuantParams convert(const SymmPerChannelQuantParams& params) { std::vector<float> scales = convert(params.scales()); const uint32_t channelDim = params.channel_dim(); return {.scales = std::move(scales), .channelDim = channelDim}; } -std::vector<uint32_t> convert(const android_nn_fuzz::Dimensions& dimensions) { +std::vector<uint32_t> convert(const Dimensions& dimensions) { const auto& repeatedDimension = dimensions.dimension(); return std::vector<uint32_t>(repeatedDimension.begin(), repeatedDimension.end()); } -TestBuffer convert(size_t size, const android_nn_fuzz::Buffer& buffer) { - if (size == 0) { +TestBuffer convert(bool makeEmpty, const Buffer& buffer) { + if (makeEmpty) { return TestBuffer(); } const uint32_t randomSeed = buffer.random_seed(); std::default_random_engine generator{randomSeed}; - return TestBuffer::createRandom(size % kMaxSize, &generator); + std::uniform_int_distribution<uint32_t> dist{0, kMaxSize}; + const uint32_t size = dist(generator); + return TestBuffer::createFromRng<uint32_t>(size, &generator); } -TestOperand convert(const android_nn_fuzz::Operand& operand) { +TestOperand convert(const Operand& operand) { const TestOperandType type = convert(operand.type()); std::vector<uint32_t> dimensions = convert(operand.dimensions()); const float scale = operand.scale(); const int32_t zeroPoint = operand.zero_point(); const TestOperandLifeTime lifetime = convert(operand.lifetime()); auto channelQuant = convert(operand.channel_quant()); - const bool isIgnored = false; - const auto opType = static_cast<OperandType>(type); - const size_t size = getNonExtensionSize(opType, dimensions).value_or(0); const bool makeEmpty = (lifetime == TestOperandLifeTime::NO_VALUE || lifetime == TestOperandLifeTime::TEMPORARY_VARIABLE); - const size_t bufferSize = makeEmpty ? 0 : size; - TestBuffer data = convert(bufferSize, operand.data()); - + TestBuffer data = convert(makeEmpty, operand.data()); return {.type = type, .dimensions = std::move(dimensions), .numberOfConsumers = 0, @@ -95,7 +90,7 @@ .data = std::move(data)}; } -std::vector<TestOperand> convert(const android_nn_fuzz::Operands& operands) { +std::vector<TestOperand> convert(const Operands& operands) { std::vector<TestOperand> testOperands; testOperands.reserve(operands.operand_size()); const auto& repeatedOperand = operands.operand(); @@ -104,19 +99,19 @@ return testOperands; } -std::vector<uint32_t> convert(const android_nn_fuzz::Indexes& indexes) { +std::vector<uint32_t> convert(const Indexes& indexes) { const auto& repeatedIndex = indexes.index(); return std::vector<uint32_t>(repeatedIndex.begin(), repeatedIndex.end()); } -TestOperation convert(const android_nn_fuzz::Operation& operation) { +TestOperation convert(const Operation& operation) { const TestOperationType type = convert(operation.type()); std::vector<uint32_t> inputs = convert(operation.inputs()); std::vector<uint32_t> outputs = convert(operation.outputs()); return {.type = type, .inputs = std::move(inputs), .outputs = std::move(outputs)}; } -std::vector<TestOperation> convert(const android_nn_fuzz::Operations& operations) { +std::vector<TestOperation> convert(const Operations& operations) { std::vector<TestOperation> testOperations; testOperations.reserve(operations.operation_size()); const auto& repeatedOperation = operations.operation(); @@ -126,30 +121,12 @@ return testOperations; } -void calculateNumberOfConsumers(const std::vector<TestOperation>& operations, - std::vector<TestOperand>* operands) { - CHECK(operands != nullptr); - const auto addConsumer = [operands](uint32_t operand) { - if (operand < operands->size()) { - operands->at(operand).numberOfConsumers++; - } - }; - const auto addAllConsumers = [&addConsumer](const TestOperation& operation) { - std::for_each(operation.inputs.begin(), operation.inputs.end(), addConsumer); - }; - std::for_each(operations.begin(), operations.end(), addAllConsumers); -} - -TestModel convert(const android_nn_fuzz::Model& model) { +TestModel convert(const Model& model) { std::vector<TestOperand> operands = convert(model.operands()); std::vector<TestOperation> operations = convert(model.operations()); std::vector<uint32_t> inputIndexes = convert(model.input_indexes()); std::vector<uint32_t> outputIndexes = convert(model.output_indexes()); const bool isRelaxed = model.is_relaxed(); - - // Calculate number of consumers. - calculateNumberOfConsumers(operations, &operands); - return {.main = {.operands = std::move(operands), .operations = std::move(operations), .inputIndexes = std::move(inputIndexes), @@ -159,7 +136,7 @@ } // anonymous namespace -TestModel convertToTestModel(const android_nn_fuzz::Test& model) { +TestModel convertToTestModel(const Test& model) { return convert(model.model()); }
diff --git a/runtime/test/android_fuzzing/DriverFuzzTest.cpp b/runtime/test/android_fuzzing/DriverFuzzTest.cpp deleted file mode 100644 index 16df014..0000000 --- a/runtime/test/android_fuzzing/DriverFuzzTest.cpp +++ /dev/null
@@ -1,324 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <MemoryUtils.h> -#include <SampleDriverFull.h> -#include <Utils.h> -#include <android-base/logging.h> -#include <android/hardware/neuralnetworks/1.3/IDevice.h> -#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h> -#include <android/hardware/neuralnetworks/1.3/types.h> - -#include <algorithm> -#include <cstdlib> -#include <limits> -#include <optional> -#include <utility> -#include <vector> - -#include "TestHarness.h" - -namespace { - -using ::android::hidl::memory::V1_0::IMemory; -using ::test_helper::TestModel; -using namespace test_helper; -using namespace android; -using namespace android::hardware; -namespace V1_0 = neuralnetworks::V1_0; -namespace V1_1 = neuralnetworks::V1_1; -namespace V1_2 = neuralnetworks::V1_2; -namespace V1_3 = neuralnetworks::V1_3; -using V1_0::DataLocation; - -sp<V1_3::IDevice> getDevice() { - /** - * TODO: INSERT CUSTOM DEVICE HERE - */ - static const sp<V1_3::IDevice> device = new nn::sample_driver::SampleDriverFull( - "example-driver", V1_0::PerformanceInfo{.execTime = 1.0f, .powerUsage = 1.0f}); - return device; -} - -V1_3::Subgraph createSubgraph(const TestSubgraph& testSubgraph, uint32_t* constCopySize, - std::vector<const TestBuffer*>* constCopies, uint32_t* constRefSize, - std::vector<const TestBuffer*>* constReferences) { - CHECK(constCopySize != nullptr); - CHECK(constCopies != nullptr); - CHECK(constRefSize != nullptr); - CHECK(constReferences != nullptr); - - // Operands. - hidl_vec<V1_3::Operand> operands(testSubgraph.operands.size()); - for (uint32_t i = 0; i < testSubgraph.operands.size(); i++) { - const auto& op = testSubgraph.operands[i]; - - DataLocation loc = {}; - if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) { - loc = { - .poolIndex = 0, - .offset = *constCopySize, - .length = static_cast<uint32_t>(op.data.size()), - }; - constCopies->push_back(&op.data); - *constCopySize += op.data.alignedSize(); - } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) { - loc = { - .poolIndex = 0, - .offset = *constRefSize, - .length = static_cast<uint32_t>(op.data.size()), - }; - constReferences->push_back(&op.data); - *constRefSize += op.data.alignedSize(); - } else if (op.lifetime == TestOperandLifeTime::SUBGRAPH) { - loc = { - .poolIndex = 0, - .offset = *op.data.get<uint32_t>(), - .length = 0, - }; - } - - V1_2::Operand::ExtraParams extraParams; - if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - extraParams.channelQuant(V1_2::SymmPerChannelQuantParams{ - .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim}); - } - - operands[i] = {.type = static_cast<V1_3::OperandType>(op.type), - .dimensions = op.dimensions, - .numberOfConsumers = op.numberOfConsumers, - .scale = op.scale, - .zeroPoint = op.zeroPoint, - .lifetime = static_cast<V1_3::OperandLifeTime>(op.lifetime), - .location = loc, - .extraParams = std::move(extraParams)}; - } - - // Operations. - hidl_vec<V1_3::Operation> operations(testSubgraph.operations.size()); - std::transform(testSubgraph.operations.begin(), testSubgraph.operations.end(), - operations.begin(), [](const TestOperation& op) -> V1_3::Operation { - return {.type = static_cast<V1_3::OperationType>(op.type), - .inputs = op.inputs, - .outputs = op.outputs}; - }); - - return {.operands = std::move(operands), - .operations = std::move(operations), - .inputIndexes = testSubgraph.inputIndexes, - .outputIndexes = testSubgraph.outputIndexes}; -} - -void copyTestBuffers(const std::vector<const TestBuffer*>& buffers, uint8_t* output) { - uint32_t offset = 0; - for (const TestBuffer* buffer : buffers) { - const uint8_t* begin = buffer->get<uint8_t>(); - const uint8_t* end = begin + buffer->size(); - std::copy(begin, end, output + offset); - offset += buffer->alignedSize(); - } -} - -V1_3::Model createModel(const TestModel& testModel) { - uint32_t constCopySize = 0; - uint32_t constRefSize = 0; - std::vector<const TestBuffer*> constCopies; - std::vector<const TestBuffer*> constReferences; - - V1_3::Subgraph mainSubgraph = createSubgraph(testModel.main, &constCopySize, &constCopies, - &constRefSize, &constReferences); - hidl_vec<V1_3::Subgraph> refSubgraphs(testModel.referenced.size()); - std::transform(testModel.referenced.begin(), testModel.referenced.end(), refSubgraphs.begin(), - [&constCopySize, &constCopies, &constRefSize, - &constReferences](const TestSubgraph& testSubgraph) { - return createSubgraph(testSubgraph, &constCopySize, &constCopies, - &constRefSize, &constReferences); - }); - - // Constant copies. - hidl_vec<uint8_t> operandValues(constCopySize); - copyTestBuffers(constCopies, operandValues.data()); - - // Shared memory. - std::vector<hidl_memory> pools = {}; - if (constRefSize > 0) { - pools.push_back(nn::allocateSharedMemory(constRefSize)); - CHECK_NE(pools.back().size(), 0u); - - // load data - sp<IMemory> mappedMemory = mapMemory(pools[0]); - CHECK(mappedMemory.get() != nullptr); - uint8_t* mappedPtr = - reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer())); - CHECK(mappedPtr != nullptr); - - copyTestBuffers(constReferences, mappedPtr); - } - - return {.main = std::move(mainSubgraph), - .referenced = std::move(refSubgraphs), - .operandValues = std::move(operandValues), - .pools = pools, - .relaxComputationFloat32toFloat16 = testModel.isRelaxed}; -} - -V1_3::Request createRequest(const TestModel& testModel) { - static constexpr uint32_t kInputPoolIndex = 0; - static constexpr uint32_t kOutputPoolIndex = 1; - - // Model inputs. - hidl_vec<V1_0::RequestArgument> inputs(testModel.main.inputIndexes.size()); - size_t inputSize = 0; - for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) { - const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]]; - if (op.data.size() == 0) { - // Omitted input. - inputs[i] = {.hasNoValue = true}; - } else { - DataLocation loc = {.poolIndex = kInputPoolIndex, - .offset = static_cast<uint32_t>(inputSize), - .length = static_cast<uint32_t>(op.data.size())}; - inputSize += op.data.alignedSize(); - inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}}; - } - } - - // Model outputs. - hidl_vec<V1_0::RequestArgument> outputs(testModel.main.outputIndexes.size()); - size_t outputSize = 0; - for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) { - const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]]; - - // In the case of zero-sized output, we should at least provide a one-byte buffer. - // This is because zero-sized tensors are only supported internally to the driver, or - // reported in output shapes. It is illegal for the client to pre-specify a zero-sized - // tensor as model output. Otherwise, we will have two semantic conflicts: - // - "Zero dimension" conflicts with "unspecified dimension". - // - "Omitted operand buffer" conflicts with "zero-sized operand buffer". - size_t bufferSize = std::max<size_t>(op.data.size(), 1); - - DataLocation loc = {.poolIndex = kOutputPoolIndex, - .offset = static_cast<uint32_t>(outputSize), - .length = static_cast<uint32_t>(bufferSize)}; - outputSize += op.data.size() == 0 ? TestBuffer::kAlignment : op.data.alignedSize(); - outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}}; - } - - // Allocate memory pools. - inputSize = std::max<size_t>(inputSize, 1); - auto inputMemory = nn::allocateSharedMemory(inputSize); - CHECK(inputMemory.valid()); - outputSize = std::max<size_t>(outputSize, 1); - auto outputMemory = nn::allocateSharedMemory(outputSize); - CHECK(outputMemory.valid()); - hidl_vec<V1_3::Request::MemoryPool> pools(2); - pools[kInputPoolIndex].hidlMemory(inputMemory); - pools[kOutputPoolIndex].hidlMemory(outputMemory); - - // Map input memory pool. - const auto mappedInput = mapMemory(inputMemory); - CHECK(mappedInput.get() != nullptr); - uint8_t* const inputPtr = static_cast<uint8_t*>(static_cast<void*>(mappedInput->getPointer())); - CHECK(inputPtr != nullptr); - - // Copy input data to the memory pool. - for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) { - const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]]; - if (op.data.size() > 0) { - const uint8_t* begin = op.data.get<uint8_t>(); - const uint8_t* end = begin + op.data.size(); - std::copy(begin, end, inputPtr + inputs[i].location.offset); - } - } - - return {.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)}; -} - -class PreparedModelCallback : public V1_3::IPreparedModelCallback { - public: - Return<void> notify(V1_0::ErrorStatus /*status*/, - const sp<V1_0::IPreparedModel>& /*preparedModel*/) override { - LOG(FATAL) << "not implemented"; - return Void(); - } - Return<void> notify_1_2(V1_0::ErrorStatus /*status*/, - const sp<V1_2::IPreparedModel>& /*preparedModel*/) override { - LOG(FATAL) << "not implemented"; - return Void(); - } - Return<void> notify_1_3(V1_3::ErrorStatus status, - const sp<V1_3::IPreparedModel>& preparedModel) override { - const sp<V1_3::IPreparedModel> result = - (status == V1_3::ErrorStatus::NONE ? preparedModel : nullptr); - { - std::lock_guard guard(mMutex); - if (mResults.has_value()) return Void(); - mResults.emplace(result); - } - mCondition.notify_all(); - return Void(); - } - - const sp<V1_3::IPreparedModel>& getResults() const { - { - std::unique_lock lock(mMutex); - mCondition.wait(lock, [this] { return mResults.has_value(); }); - } - return mResults.value(); - } - - private: - mutable std::mutex mMutex; - mutable std::condition_variable mCondition; - std::optional<const sp<V1_3::IPreparedModel>> mResults; -}; - -sp<V1_3::IPreparedModel> prepareModel(const sp<V1_3::IDevice>& device, const V1_3::Model& model) { - const sp<PreparedModelCallback> callback = new PreparedModelCallback(); - device->prepareModel_1_3(model, V1_1::ExecutionPreference::FAST_SINGLE_ANSWER, - V1_3::Priority::MEDIUM, {}, {}, {}, {}, callback); - return callback->getResults(); -} - -void execute(const sp<V1_3::IPreparedModel>& preparedModel, const V1_3::Request& request) { - const auto cb = [](V1_3::ErrorStatus /*status*/, - const hidl_vec<V1_2::OutputShape>& /*outputShapes*/, - V1_2::Timing /*timing*/) { - // TODO: CHECK VTS requirements? - }; - preparedModel->executeSynchronously_1_3(request, V1_2::MeasureTiming::YES, {}, {}, cb); -} - -} // anonymous namespace - -void nnapiFuzzTest(const TestModel& testModel) { - // Set up device. - const auto device = getDevice(); - CHECK(device != nullptr); - - // Set up model. - const auto model = createModel(testModel); - - // Attempt to prepare the model. - const auto preparedModel = prepareModel(device, model); - if (preparedModel == nullptr) return; - - // Set up request. - const auto request = createRequest(testModel); - - // Perform execution. - execute(preparedModel, request); -}
diff --git a/runtime/test/android_fuzzing/FuzzHarness.cpp b/runtime/test/android_fuzzing/FuzzHarness.cpp deleted file mode 100644 index e6aef56..0000000 --- a/runtime/test/android_fuzzing/FuzzHarness.cpp +++ /dev/null
@@ -1,54 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <LegacyUtils.h> -#include <nnapi/TypeUtils.h> -#include <src/libfuzzer/libfuzzer_macro.h> - -#include <algorithm> - -#include "Converter.h" -#include "Model.pb.h" -#include "TestHarness.h" - -// Fuzz test logic. This function will either run to completion and return, or crash. -extern void nnapiFuzzTest(const ::test_helper::TestModel& testModel); - -namespace { - -using ::android::nn::getNonExtensionSize; -using ::android::nn::OperandType; -using ::android::nn::fuzz::convertToTestModel; -using ::test_helper::TestModel; -using ::test_helper::TestOperand; - -bool operandOverflows(const TestOperand& operand) { - const auto operandType = static_cast<OperandType>(operand.type); - return getNonExtensionSize(operandType, operand.dimensions).has_value(); -} - -bool shouldSkip(const TestModel& model) { - return std::any_of(model.main.operands.begin(), model.main.operands.end(), operandOverflows); -} - -} // namespace - -DEFINE_PROTO_FUZZER(const ::android_nn_fuzz::Test& model) { - const TestModel testModel = convertToTestModel(model); - if (!shouldSkip(testModel)) { - nnapiFuzzTest(testModel); - } -}
diff --git a/runtime/test/android_fuzzing/FuzzTest.cpp b/runtime/test/android_fuzzing/FuzzTest.cpp index a6fc108..ebf3d3a 100644 --- a/runtime/test/android_fuzzing/FuzzTest.cpp +++ b/runtime/test/android_fuzzing/FuzzTest.cpp
@@ -20,11 +20,16 @@ #include <optional> #include <utility> +#include "Converter.h" +#include "Model.pb.h" #include "NeuralNetworksWrapper.h" #include "TestHarness.h" +#include "src/libfuzzer/libfuzzer_macro.h" namespace { +using ::android::nn::fuzz::convertToTestModel; +using ::android_nn_fuzz::Test; using ::test_helper::TestModel; using namespace ::android::nn::wrapper; using namespace test_helper; @@ -125,9 +130,7 @@ return execution; } -} // anonymous namespace - -void nnapiFuzzTest(const TestModel& testModel) { +void runTest(const TestModel& testModel) { // set up model auto model = CreateModel(testModel); if (!model.has_value()) { @@ -149,3 +152,10 @@ // perform execution execution->compute(); } + +} // anonymous namespace + +DEFINE_PROTO_FUZZER(const Test& model) { + const TestModel testModel = convertToTestModel(model); + runTest(testModel); +}
diff --git a/runtime/test/android_fuzzing/GenerateCorpus.cpp b/runtime/test/android_fuzzing/GenerateCorpus.cpp index 783b660..2f72b9d 100644 --- a/runtime/test/android_fuzzing/GenerateCorpus.cpp +++ b/runtime/test/android_fuzzing/GenerateCorpus.cpp
@@ -41,8 +41,8 @@ return static_cast<OperationType>(type); } -Operand::LifeTime convert(TestOperandLifeTime lifetime) { - return static_cast<Operand::LifeTime>(lifetime); +OperandLifeTime convert(TestOperandLifeTime lifetime) { + return static_cast<OperandLifeTime>(lifetime); } Scales convert(const std::vector<float>& scales) {
diff --git a/runtime/test/android_fuzzing/README.md b/runtime/test/android_fuzzing/README.md deleted file mode 100644 index c143f79..0000000 --- a/runtime/test/android_fuzzing/README.md +++ /dev/null
@@ -1,310 +0,0 @@ -# Background - -This document seeks to be a crash-course and cheat-sheet for running the NNAPI -fuzz tests. - -The purpose of fuzz testing is to find crashes, assertions, memory violations, -or general undefined behavior in the code under test due to factors such as -unexpected inputs. For NNAPI fuzz testing, Android uses tests based on -`libFuzzer`, which are efficient at fuzzing because they use line coverage of -previous test cases to generate new random inputs. For example, `libFuzzer` -favors test cases that run on uncovered lines of code. This greatly reduces the -amount of time tests take to find problematic code. - -Currently, there are two NNAPI fuzz test targets: `libneuralnetworks_fuzzer` -which tests at the NNAPI NDK layer (testing libneuralnetworks as a static -library) and `libneuralnetworks_driver_fuzzer` which tests an in-process driver -at the NNAPI HAL layer (the sample driver, unless the test is modified to do -otherwise). To simplify development of future tests, this directory also -defines an NNAPI fuzzing test harness and packages it in a blueprint default -`libneuralnetworks_fuzzer_defaults`. - -Useful background reading and reference documents: -* libFuzzer overview: http://llvm.org/docs/LibFuzzer.html -* Android-specific libFuzzer documentation: - https://source.android.com/devices/tech/debug/libfuzzer -* Android Security Testing (sanitizers, fuzzer, etc.): - https://source.android.com/devices/tech/debug/fuzz-sanitize -* Sanitizer flags: - https://github.com/google/sanitizers/wiki/SanitizerCommonFlags -* Address Sanitizer flags: - https://github.com/google/sanitizers/wiki/AddressSanitizerFlags -* libprotobuf-mutator: - https://github.com/google/libprotobuf-mutator#libprotobuf-mutator - -# Setting up the test - -## Developing an NNAPI fuzz test - -### Creating a new fuzz test using `libneuralnetworks_fuzzer_defaults` - -To create a new fuzz test: -1. Create code that implements the function - `void nnapiFuzzTest(const TestModel& testModel)` (examples: [1][1], [2][2]) -2. Create a blueprint `cc_fuzz` target that includes - `libneuralnetworks_fuzzer_defaults` as a default (examples: [1][3], [2][4]) - -### Modifying `libneuralnetworks_driver_fuzzer` to test custom driver - -Alter the `libneuralnetworks_driver_fuzzer` code locally to test your own -driver. In the section `“TODO: INSERT CUSTOM DEVICE HERE”`, replace -`“new nn::sample_driver::SampleDriverFull(…);”` ([link][5]) with your own -driver. - -This code employs an in-process driver (as opposed to retrieving it on the -device via `IDevice::getService(...))` for three reasons. First, the test runs -faster because it does not need to communicate with the driver via IPC because -the driver is created in the same process. Second, it ensures that the -`libFuzzer` can use the coverage from the driver to guide the test -appropriately, as everything is built as one unit. Finally, whenever a crash -occurs, only one stacktrace needs to be analyzed to debug the problem. - -The current version of the test assumes a 1.3 driver and uses the methods -`IDevice::prepareModel_1_3` and `IDevice::executeSynchronously_1_3` -([link][6]). Change the test locally to test different methods or different -driver versions. - -## Preparing a device - -Because the test is self-contained, you should be able to just use a regular -device image without any modifications. The next section -[Building and uploading fuzz test](#building-and-uploading-fuzz-test) describes -how to build the test binary itself. If you need to have the entire image -fuzzed (for example, if you want to sanitize a shared library), you can build a -sanitized image with one of the following two sequences of commands depending -on your needs: - -### You can build a pre-configured sanitized device image with: -```bash -$ . build/envsetup.sh -$ lunch <sanitized_target> # e.g., <TARGET_PRODUCT>_hwasan-userdebug -$ mma -j -``` - -### Alternatively, you can build other (read: non-sanitized) targets with the following command: -```bash -$ . build/envsetup.sh -$ lunch <non-sanitized_target> # e.g., <TARGET_PRODUCT>-userdebug -$ SANITIZE_TARGET=hwaddress mma -j -``` - -## Building and uploading fuzz test - -For simplicity and clarity, the rest of the code here will use the following -environment variables: -``` -$ FUZZER_NAME=libneuralnetworks_driver_fuzzer -$ FUZZER_TARGET_ARCH=$(get_build_var TARGET_ARCH) -$ FUZZER_TARGET_DIR=/data/fuzz/$FUZZER_TARGET_ARCH/$FUZZER_NAME -$ FUZZER_TARGET=$FUZZER_TARGET_DIR/$FUZZER_NAME -``` - -When using a sanitized lunch target, build the fuzz test with the following -command: -```bash -$ m $FUZZER_NAME -j -``` - -When building with a non-sanitized lunch target, build the fuzz test with the -following command: -```bash -$ SANITIZE_TARGET=hwaddress m $FUZZER_NAME -j -``` - -Note that the above commands use `hwaddress` sanitization, but other sanitizers -can be used in place of or in addition to `hwaddress`. More command options for -building with other sanitizers can be found [here][7], and they are explained -more in depth in the Android background reading [here][8]. - -Once the test is built, it can be pushed to the device via: -```bash -$ adb root -$ adb sync data -$ adb shell mkdir -p $FUZZER_TARGET_DIR/dump -``` - -The directory `$FUZZER_TARGET_DIR/` is now as follows: -* `$FUZZER_NAME` -- fuzz test binary -* `corpus/` -- directory for reference/example “good” test cases, used to speed - up fuzz tests -* `dump/` -- sandbox directory used by the fuzz test; this can be ignored -* `crash-*` -- any future problematic test cases will be dumped to the directory - -# Running the test - -## Running the full fuzz test - -The fuzz test can be launched with the following command, and will continue -running until the user terminates the process (e.g., ctrl+c) or until the test -crashes. - -```bash -$ adb shell HWASAN_OPTIONS=handle_sigill=2:handle_sigfpe=2:handle_sigbus=2:handle_abort=2:handle_segv=2 $FUZZER_TARGET $FUZZER_TARGET_DIR/dump/ $FUZZER_TARGET_DIR/corpus/ -artifact_prefix=$FUZZER_TARGET_DIR/ -``` - -(When using a non-hwasan build, you need to change the `HWASAN_OPTIONS` -variable to match whatever build you’re using, e.g., `ASAN_OPTIONS`.) - -When something unexpected occurs (e.g., a crash or a very slow test case), the -test case that causes it will be dumped to a file in the directory specified by -“`-artifact_prefix`”. The generated file will appear as -`slow-unit-<unique_identifier>`, `crash-<unique_identifier>`, -`oom-<unique_identifier>`, or `timeout-<unique_identifier>`. Normally, -`libFuzzer` crash files will contain unreadable binary data; however, -`libneuralnetworks_driver_fuzzer`‘s output is formatted in a human readable way -because it uses `libprotobuf-mutator`, so it’s fine to inspect the file to get -more information on the test case that caused the problem. For more -information, refer to the [Fuzz test case format](#fuzz-test-case-format) -section below. - -## Reproducing crash case - -When a crash occurs, the crash test case can be re-run with the following -command: - -```bash -$ adb shell HWASAN_OPTIONS=handle_sigill=2:handle_sigfpe=2:handle_sigbus=2:handle_abort=2:handle_segv=2 $FUZZER_TARGET $FUZZER_TARGET_DIR/<test_case_name> -``` -(Note that the execution parameters for `HWASAN_OPTIONS` are the same as those -above.) - -E.g., `<test_case_name>` could be: -* `minimized-from-15b1dae0d2872d8dccf4f35fbf4ecbecee697a49` -* `slow-unit-cad88bd58853b71b875ac048001b78f7a7501dc3` -* `crash-07cb8793bbc65ab010382c0f8d40087897826129` - -# Finding minimal crash case - -When a crash occurs, sometimes the offending test case is large and -complicated. `libFuzzer` has a way to minimize the crashing case to simplify -debugging with the following command: - -```bash -$ adb shell HWASAN_OPTIONS=handle_sigill=2:handle_sigfpe=2:handle_sigbus=2:handle_abort=2:handle_segv=2 $FUZZER_TARGET $FUZZER_TARGET_DIR/<test_case_name> -artifact_prefix=$FUZZER_TARGET_DIR/ -minimize_crash=1 -max_total_time=60 -``` -(Note that the execution parameters for `HWASAN_OPTIONS` are the same as those -above.) - -Note that the `<test_case_name>` must be some sort of crash for the -minimization to work. For example, minimization will not work on something like -`slow_unit-*` cases. Increasing the `max_total_time` value may yield a more -minimal test crash, but will take longer. - -## Fuzz test case format - -By itself, `libFuzzer` will generate a random collection of bytes as input to -the fuzz test. The test developer then needs to convert this random data to -some structured testing format (e.g., a syntactically correct NNAPI model). -Doing this conversion can be slow and difficult, and can lead to inefficient -mutations and tests. Additionally, whenever the fuzz test finds a crashing test -case, it will dump this test case as an unreadable binary chunk of data in a -file (e.g., `crash-*` files described above). - -To help with both of these issues, the NNAPI fuzz tests additionally use a -library called [`libprotobuf-mutator`][9] to handle the conversions from the -random `libFuzzer` input to a protobuf format used for NNAPI fuzz testing. The -conversion from this protobuf format to a model format is much more -straightforward and efficient. As another useful utility, `libprotobuf-mutator` -provides the option to represent this data as human-readable text. This means -that whenever the fuzz test finds a crash, the resultant test case that is -dumped to a file will be in a human-readable format. - -Here is one example of a crash case that was found: -``` -model { - operands { - operand { - type: TENSOR_INT32 - dimensions { - dimension: 1 - } - scale: 0 - zero_point: 0 - lifetime: TEMPORARY_VARIABLE - channel_quant { - scales { - } - channel_dim: 0 - } - data { - random_seed: 4 - } - } - operand { - type: TENSOR_FLOAT32 - dimensions { - dimension: 2 - dimension: 4 - } - scale: 0 - zero_point: 0 - lifetime: TEMPORARY_VARIABLE - channel_quant { - scales { - } - channel_dim: 0 - } - data { - random_seed: 0 - } - } - operand { - type: TENSOR_FLOAT32 - dimensions { - } - scale: 0 - zero_point: 0 - lifetime: SUBGRAPH_OUTPUT - channel_quant { - scales { - } - channel_dim: 27 - } - data { - random_seed: 0 - } - } - } - operations { - operation { - type: EMBEDDING_LOOKUP - inputs { - index: 0 - index: 1 - } - outputs { - index: 2 - } - } - } - input_indexes { - index: 0 - index: 1 - } - output_indexes { - index: 2 - } - is_relaxed: true -} -``` - -This format is largely based on the format defined in [NNAPI HAL][10]. The one -major exception is that the contents of an operand's data are replaced by data -generated from “random_seed” (except for `TEMPORARY_VARIABLE` and `NO_VALUE` -operands, in which cases there is no data, so "random_seed" is ignored). This -is done for a practical reason: `libFuzzer` (and by extension -`libprotobuf-mutator`) converge slower when the amount of randomly generated -input is large. For the fuzz tests, the contents of the operand data are not as -interesting as the structure of the graph itself, so the data was replaced by -a seed to a random number generator instead. - -[1]: https://cs.android.com/android/platform/superproject/+/master:packages/modules/NeuralNetworks/runtime/test/android_fuzzing/DriverFuzzTest.cpp;l=307-324;drc=34aee872d5dc317ad8a32377e9114c0c606d8afe -[2]: https://cs.android.com/android/platform/superproject/+/master:packages/modules/NeuralNetworks/runtime/test/android_fuzzing/FuzzTest.cpp;l=130-151;drc=34aee872d5dc317ad8a32377e9114c0c606d8afe -[3]: https://cs.android.com/android/platform/superproject/+/master:packages/modules/NeuralNetworks/runtime/test/Android.bp;l=195-216;drc=60823f07172e6b5bbc06b2fac25a15ab91c80b25 -[4]: https://cs.android.com/android/platform/superproject/+/master:packages/modules/NeuralNetworks/runtime/test/Android.bp;l=218-240;drc=60823f07172e6b5bbc06b2fac25a15ab91c80b25 -[5]: https://cs.android.com/android/platform/superproject/+/master:packages/modules/NeuralNetworks/runtime/test/android_fuzzing/DriverFuzzTest.cpp;l=48-52;drc=34aee872d5dc317ad8a32377e9114c0c606d8afe -[6]: https://cs.android.com/android/platform/superproject/+/master:packages/modules/NeuralNetworks/runtime/test/android_fuzzing/DriverFuzzTest.cpp;l=291-292,302;drc=34aee872d5dc317ad8a32377e9114c0c606d8afe -[7]: https://cs.android.com/android/platform/superproject/+/master:build/soong/cc/sanitize.go;l=140-187;drc=b5b2aba43b5bb6305ea69d60f9bf580f711d7c96 -[8]: https://source.android.com/devices/tech/debug/libfuzzer -[9]: https://cs.android.com/android/platform/superproject/+/master:external/libprotobuf-mutator/ -[10]: https://cs.android.com/android/platform/superproject/+/master:hardware/interfaces/neuralnetworks/
diff --git a/runtime/test/fibonacci_extension/FibonacciDriver.cpp b/runtime/test/fibonacci_extension/FibonacciDriver.cpp index 95dfc99..c488298 100644 --- a/runtime/test/fibonacci_extension/FibonacciDriver.cpp +++ b/runtime/test/fibonacci_extension/FibonacciDriver.cpp
@@ -18,24 +18,25 @@ #include "FibonacciDriver.h" -#include <HalInterfaces.h> -#include <OperationResolver.h> -#include <OperationsUtils.h> -#include <Utils.h> -#include <ValidateHal.h> -#include <nnapi/Types.h> - #include <vector> #include "FibonacciExtension.h" +#include "HalInterfaces.h" #include "NeuralNetworksExtensions.h" +#include "OperationResolver.h" +#include "OperationsUtils.h" +#include "Utils.h" +#include "ValidateHal.h" namespace android { namespace nn { namespace sample_driver { namespace { -const uint32_t kTypeWithinExtensionMask = (1 << kExtensionTypeBits) - 1; +using namespace hal; + +const uint8_t kLowBitsType = static_cast<uint8_t>(ExtensionTypeEncoding::LOW_BITS_TYPE); +const uint32_t kTypeWithinExtensionMask = (1 << kLowBitsType) - 1; namespace fibonacci_op { @@ -47,22 +48,22 @@ constexpr uint32_t kNumOutputs = 1; constexpr uint32_t kOutputTensor = 0; -bool getFibonacciExtensionPrefix(const V1_3::Model& model, uint16_t* prefix) { +bool getFibonacciExtensionPrefix(const Model& model, uint16_t* prefix) { NN_RET_CHECK_EQ(model.extensionNameToPrefix.size(), 1u); // Assumes no other extensions in use. NN_RET_CHECK_EQ(model.extensionNameToPrefix[0].name, EXAMPLE_FIBONACCI_EXTENSION_NAME); *prefix = model.extensionNameToPrefix[0].prefix; return true; } -bool isFibonacciOperation(const V1_3::Operation& operation, const V1_3::Model& model) { +bool isFibonacciOperation(const Operation& operation, const Model& model) { int32_t operationType = static_cast<int32_t>(operation.type); uint16_t prefix; NN_RET_CHECK(getFibonacciExtensionPrefix(model, &prefix)); - NN_RET_CHECK_EQ(operationType, (prefix << kExtensionTypeBits) | EXAMPLE_FIBONACCI); + NN_RET_CHECK_EQ(operationType, (prefix << kLowBitsType) | EXAMPLE_FIBONACCI); return true; } -bool validate(const V1_3::Operation& operation, const V1_3::Model& model) { +bool validate(const Operation& operation, const Model& model) { NN_RET_CHECK(isFibonacciOperation(operation, model)); NN_RET_CHECK_EQ(operation.inputs.size(), kNumInputs); NN_RET_CHECK_EQ(operation.outputs.size(), kNumOutputs); @@ -70,9 +71,9 @@ int32_t outputType = static_cast<int32_t>(model.main.operands[operation.outputs[0]].type); uint16_t prefix; NN_RET_CHECK(getFibonacciExtensionPrefix(model, &prefix)); - NN_RET_CHECK(inputType == ((prefix << kExtensionTypeBits) | EXAMPLE_INT64) || + NN_RET_CHECK(inputType == ((prefix << kLowBitsType) | EXAMPLE_INT64) || inputType == ANEURALNETWORKS_TENSOR_FLOAT32); - NN_RET_CHECK(outputType == ((prefix << kExtensionTypeBits) | EXAMPLE_TENSOR_QUANT64_ASYMM) || + NN_RET_CHECK(outputType == ((prefix << kLowBitsType) | EXAMPLE_TENSOR_QUANT64_ASYMM) || outputType == ANEURALNETWORKS_TENSOR_FLOAT32); return true; } @@ -127,7 +128,7 @@ uint64_t* output = context->getOutputBuffer<uint64_t>(kOutputTensor); Shape outputShape = context->getOutputShape(kOutputTensor); auto outputQuant = reinterpret_cast<const ExampleQuant64AsymmParams*>( - std::get<Operand::ExtensionParams>(outputShape.extraParams).data()); + outputShape.extraParams.extension().data()); return compute(n, outputQuant->scale, outputQuant->zeroPoint, output); } } @@ -141,14 +142,14 @@ static OperationRegistration operationRegistration(operationType, fibonacci_op::kOperationName, nullptr, fibonacci_op::prepare, fibonacci_op::execute, {}); - uint16_t prefix = static_cast<int32_t>(operationType) >> kExtensionTypeBits; + uint16_t prefix = static_cast<int32_t>(operationType) >> kLowBitsType; uint16_t typeWithinExtension = static_cast<int32_t>(operationType) & kTypeWithinExtensionMask; // Assumes no other extensions in use. return prefix != 0 && typeWithinExtension == EXAMPLE_FIBONACCI ? &operationRegistration : nullptr; } -hardware::Return<void> FibonacciDriver::getSupportedExtensions(getSupportedExtensions_cb cb) { +Return<void> FibonacciDriver::getSupportedExtensions(getSupportedExtensions_cb cb) { cb(V1_0::ErrorStatus::NONE, { { @@ -168,44 +169,44 @@ }, }, }); - return hardware::Void(); + return Void(); } -hardware::Return<void> FibonacciDriver::getCapabilities_1_3(getCapabilities_1_3_cb cb) { +Return<void> FibonacciDriver::getCapabilities_1_3(getCapabilities_1_3_cb cb) { android::nn::initVLogMask(); VLOG(DRIVER) << "getCapabilities()"; - static const V1_0::PerformanceInfo kPerf = {.execTime = 1.0f, .powerUsage = 1.0f}; - V1_3::Capabilities capabilities = { + static const PerformanceInfo kPerf = {.execTime = 1.0f, .powerUsage = 1.0f}; + Capabilities capabilities = { .relaxedFloat32toFloat16PerformanceScalar = kPerf, .relaxedFloat32toFloat16PerformanceTensor = kPerf, .operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_3>(kPerf), .ifPerformance = kPerf, .whilePerformance = kPerf}; cb(V1_3::ErrorStatus::NONE, capabilities); - return hardware::Void(); + return Void(); } -hardware::Return<void> FibonacciDriver::getSupportedOperations_1_3( - const V1_3::Model& model, getSupportedOperations_1_3_cb cb) { +Return<void> FibonacciDriver::getSupportedOperations_1_3(const V1_3::Model& model, + getSupportedOperations_1_3_cb cb) { VLOG(DRIVER) << "getSupportedOperations()"; if (!validateModel(model)) { cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {}); - return hardware::Void(); + return Void(); } const size_t count = model.main.operations.size(); std::vector<bool> supported(count); for (size_t i = 0; i < count; ++i) { - const V1_3::Operation& operation = model.main.operations[i]; + const Operation& operation = model.main.operations[i]; if (fibonacci_op::isFibonacciOperation(operation, model)) { if (!fibonacci_op::validate(operation, model)) { cb(V1_3::ErrorStatus::INVALID_ARGUMENT, {}); - return hardware::Void(); + return Void(); } supported[i] = true; } } cb(V1_3::ErrorStatus::NONE, supported); - return hardware::Void(); + return Void(); } } // namespace sample_driver
diff --git a/runtime/test/fibonacci_extension/FibonacciDriver.h b/runtime/test/fibonacci_extension/FibonacciDriver.h index 28b55d8..303edd8 100644 --- a/runtime/test/fibonacci_extension/FibonacciDriver.h +++ b/runtime/test/fibonacci_extension/FibonacciDriver.h
@@ -17,9 +17,9 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_FIBONACCI_EXTENSION_FIBONACCI_DRIVER_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_FIBONACCI_EXTENSION_FIBONACCI_DRIVER_H -#include <HalInterfaces.h> -#include <OperationResolver.h> -#include <SampleDriver.h> +#include "HalInterfaces.h" +#include "OperationResolver.h" +#include "SampleDriver.h" namespace android { namespace nn { @@ -34,7 +34,7 @@ return &instance; } - const OperationRegistration* findOperation(OperationType operationType) const override; + const OperationRegistration* findOperation(hal::OperationType operationType) const override; private: FibonacciOperationResolver() {} @@ -45,10 +45,10 @@ class FibonacciDriver : public SampleDriver { public: FibonacciDriver() : SampleDriver(kDriverName, FibonacciOperationResolver::get()) {} - hardware::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override; - hardware::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; - hardware::Return<void> getSupportedOperations_1_3(const V1_3::Model& model, - getSupportedOperations_1_3_cb cb) override; + hal::Return<void> getSupportedExtensions(getSupportedExtensions_cb cb) override; + hal::Return<void> getCapabilities_1_3(getCapabilities_1_3_cb cb) override; + hal::Return<void> getSupportedOperations_1_3(const hal::V1_3::Model& model, + getSupportedOperations_1_3_cb cb) override; static constexpr char kDriverName[] = "sample-driver-fibonacci-extension"; };
diff --git a/runtime/test/fibonacci_extension/FibonacciExtensionTest.cpp b/runtime/test/fibonacci_extension/FibonacciExtensionTest.cpp index 07cdb58..cdafa34 100644 --- a/runtime/test/fibonacci_extension/FibonacciExtensionTest.cpp +++ b/runtime/test/fibonacci_extension/FibonacciExtensionTest.cpp
@@ -14,21 +14,22 @@ * limitations under the License. */ -#include <HalInterfaces.h> -#include <ValidateHal.h> -#include <gtest/gtest.h> - -#include <vector> - -#include "FibonacciDriver.h" -#include "FibonacciExtension.h" -#include "HalUtils.h" +#include "HalInterfaces.h" #include "Manager.h" #include "NeuralNetworks.h" #include "NeuralNetworksExtensions.h" #include "NeuralNetworksWrapperExtensions.h" #include "TestNeuralNetworksWrapper.h" #include "TypeManager.h" +#include "Utils.h" +#include "ValidateHal.h" + +#include <gtest/gtest.h> + +#include "FibonacciDriver.h" +#include "FibonacciExtension.h" + +#include <vector> namespace android { namespace nn { @@ -50,33 +51,34 @@ // Real world extension tests should run against actual hardware // implementations, but there is no hardware supporting the test // extension. Hence the sample software driver. - DeviceManager::get()->forTest_registerDevice(makeSharedDevice( - sample_driver::FibonacciDriver::kDriverName, new sample_driver::FibonacciDriver())); + DeviceManager::get()->forTest_registerDevice(sample_driver::FibonacciDriver::kDriverName, + new sample_driver::FibonacciDriver()); // Discover extensions provided by registered devices. TypeManager::get()->forTest_reset(); uint32_t numDevices = 0; ASSERT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR); + ANeuralNetworksDevice* fibonacciDevice = nullptr; + ANeuralNetworksDevice* cpuDevice = nullptr; for (uint32_t i = 0; i < numDevices; i++) { ANeuralNetworksDevice* device = nullptr; EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR); - mAllDevices.push_back(device); bool supportsFibonacciExtension; ASSERT_EQ( ANeuralNetworksDevice_getExtensionSupport( device, EXAMPLE_FIBONACCI_EXTENSION_NAME, &supportsFibonacciExtension), ANEURALNETWORKS_NO_ERROR); if (supportsFibonacciExtension) { - ASSERT_EQ(mFibonacciDevice, nullptr) << "Found multiple Fibonacci drivers"; - mFibonacciDevice = device; + ASSERT_EQ(fibonacciDevice, nullptr) << "Found multiple Fibonacci drivers"; + fibonacciDevice = device; } else if (DeviceManager::get()->forTest_isCpuDevice(device)) { - ASSERT_EQ(mCpuDevice, nullptr) << "Found multiple CPU drivers"; - mCpuDevice = device; + ASSERT_EQ(cpuDevice, nullptr) << "Found multiple CPU drivers"; + cpuDevice = device; } } - ASSERT_NE(mFibonacciDevice, nullptr) << "Expecting Fibonacci driver to be available"; - ASSERT_NE(mCpuDevice, nullptr) << "Expecting CPU driver to be available"; - mDevices = {mFibonacciDevice, mCpuDevice}; + ASSERT_NE(fibonacciDevice, nullptr) << "Expecting Fibonacci driver to be available"; + ASSERT_NE(cpuDevice, nullptr) << "Expecting CPU driver to be available"; + mDevices = {fibonacciDevice, cpuDevice}; } virtual void TearDown() { @@ -90,13 +92,12 @@ TypeManager::get()->forTest_reset(); } - void checkSupportedOperations(const std::vector<bool>& expected, - const std::vector<ANeuralNetworksDevice*> devices) { + void checkSupportedOperations(const std::vector<bool>& expected) { const uint32_t kMaxNumberOperations = 256; EXPECT_LE(expected.size(), kMaxNumberOperations); bool supported[kMaxNumberOperations] = {false}; EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices( - mModel.getHandle(), devices.data(), devices.size(), supported), + mModel.getHandle(), mDevices.data(), mDevices.size(), supported), ANEURALNETWORKS_NO_ERROR); for (size_t i = 0; i < expected.size(); ++i) { SCOPED_TRACE(::testing::Message() << "i = " << i); @@ -104,10 +105,6 @@ } } - void checkSupportedOperations(const std::vector<bool>& expected) { - checkSupportedOperations(expected, mDevices); - } - void prepareForExecution() { ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(), mDevices.size(), &mCompilation), @@ -117,10 +114,7 @@ ANEURALNETWORKS_NO_ERROR); } - ANeuralNetworksDevice* mFibonacciDevice = nullptr; - ANeuralNetworksDevice* mCpuDevice = nullptr; - std::vector<ANeuralNetworksDevice*> mDevices; // Fibonacci and CPU devices. - std::vector<ANeuralNetworksDevice*> mAllDevices; + std::vector<ANeuralNetworksDevice*> mDevices; ANeuralNetworksExecution* mExecution = nullptr; ANeuralNetworksCompilation* mCompilation = nullptr; ExtensionModel mModel; @@ -340,20 +334,6 @@ ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA); } -TEST_F(FibonacciExtensionTest, GetSupportedOperations) { - ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1}); - ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1}); - createModel(&mModel, inputType, outputType, /*addNopOperations=*/false); - - for (ANeuralNetworksDevice* device : mAllDevices) { - const char* name = nullptr; - ASSERT_EQ(ANeuralNetworksDevice_getName(device, &name), ANEURALNETWORKS_NO_ERROR); - SCOPED_TRACE(::testing::Message() << "device = " << name); - // Only Fibonacci device should support Fibonacci operation. - checkSupportedOperations({device == mFibonacciDevice}, {device}); - } -} - } // namespace } // namespace nn } // namespace android
diff --git a/runtime/test/fuzzing/RandomGraphGenerator.cpp b/runtime/test/fuzzing/RandomGraphGenerator.cpp index 8ba763a..7307f68 100644 --- a/runtime/test/fuzzing/RandomGraphGenerator.cpp +++ b/runtime/test/fuzzing/RandomGraphGenerator.cpp
@@ -44,7 +44,7 @@ RandomOperand::RandomOperand(const OperandSignature& operand, TestOperandType dataType, uint32_t rank) : type(operand.type), finalizer(operand.finalizer) { - NN_FUZZER_LOG << "Operand: " << type; + NN_FUZZER_LOG << "Operand: " << toString(type); if (operand.constructor) operand.constructor(dataType, rank, this); } @@ -81,7 +81,7 @@ // Construct a RandomOperation from OperationSignature. RandomOperation::RandomOperation(const OperationSignature& operation) : opType(operation.opType), finalizer(operation.finalizer) { - NN_FUZZER_LOG << "Operation: " << opType; + NN_FUZZER_LOG << "Operation: " << toString(opType); // Determine the data type and rank of the operation and invoke the constructor. TestOperandType dataType = getRandomChoice(operation.supportedDataTypes); @@ -187,11 +187,6 @@ if (operand->type == RandomOperandType::INPUT) numInputs++; } - auto requiresBufferAllocation = [](std::shared_ptr<RandomOperand>& operand) -> bool { - return operand->type != RandomOperandType::INTERNAL && - operand->type != RandomOperandType::NO_VALUE; - }; - for (auto& operand : mOperands) { // Turn INPUT into CONST with probability prob. Need to keep at least one INPUT. float prob = 0.5f; @@ -199,7 +194,8 @@ if (operand->type == RandomOperandType::INPUT) numInputs--; operand->type = RandomOperandType::CONST; } - if (requiresBufferAllocation(operand)) { + if (operand->type != RandomOperandType::INTERNAL && + operand->type != RandomOperandType::NO_VALUE) { if (operand->buffer.empty()) operand->resizeBuffer<uint8_t>(operand->getBufferSize()); // If operand is set by randomBuffer, copy the frozen values into buffer. if (!operand->randomBuffer.empty()) { @@ -213,20 +209,6 @@ } for (auto& operation : mOperations) { - for (auto operand : operation.inputs) { - if (requiresBufferAllocation(operand)) { - NN_FUZZER_CHECK(!operand->buffer.empty()) - << " input operand has no allocated buffer!"; - } - } - - for (auto& operand : operation.outputs) { - if (requiresBufferAllocation(operand)) { - NN_FUZZER_CHECK(!operand->buffer.empty()) - << " output operand has no allocated buffer!"; - } - } - if (operation.finalizer) operation.finalizer(&operation); } return true; @@ -294,14 +276,14 @@ // Set model operations. for (auto& operation : mOperations) { - NN_FUZZER_LOG << "Operation: " << operation.opType; + NN_FUZZER_LOG << "Operation: " << toString(operation.opType); TestOperation testOperation = {.type = static_cast<TestOperationType>(operation.opType)}; for (auto& op : operation.inputs) { - NN_FUZZER_LOG << *op; + NN_FUZZER_LOG << toString(*op); testOperation.inputs.push_back(op->opIndex); } for (auto& op : operation.outputs) { - NN_FUZZER_LOG << *op; + NN_FUZZER_LOG << toString(*op); testOperation.outputs.push_back(op->opIndex); } testModel.main.operations.push_back(std::move(testOperation));
diff --git a/runtime/test/fuzzing/RandomGraphGeneratorUtils.cpp b/runtime/test/fuzzing/RandomGraphGeneratorUtils.cpp index 32fd62c..54d468d 100644 --- a/runtime/test/fuzzing/RandomGraphGeneratorUtils.cpp +++ b/runtime/test/fuzzing/RandomGraphGeneratorUtils.cpp
@@ -45,9 +45,6 @@ return oss.str(); } -bool LoggerStream::mWriteAbortMessageToSystemLog = false; -std::string LoggerStream::mLogTag; - } // namespace fuzzing_test } // namespace nn } // namespace android
diff --git a/runtime/test/fuzzing/RandomGraphGeneratorUtils.h b/runtime/test/fuzzing/RandomGraphGeneratorUtils.h index 8faae12..163c46f 100644 --- a/runtime/test/fuzzing/RandomGraphGeneratorUtils.h +++ b/runtime/test/fuzzing/RandomGraphGeneratorUtils.h
@@ -17,8 +17,6 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_FUZZING_RANDOM_GRAPH_GENERATOR_UTILS_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_FUZZING_RANDOM_GRAPH_GENERATOR_UTILS_H -#include <android/log.h> - #include <chrono> #include <fstream> #include <limits> @@ -38,8 +36,6 @@ namespace fuzzing_test { #define NN_FUZZER_LOG_INIT(filename) Logger::get()->init((filename)) -#define NN_FUZZER_LOG_WRITE_FATAL_TO_SYSLOG(logTag) \ - LoggerStream::writeAbortMessageToSystemLog(logTag) #define NN_FUZZER_LOG_CLOSE Logger::get()->close() #define NN_FUZZER_LOG \ if (!Logger::get()->enabled()) \ @@ -88,11 +84,7 @@ ~LoggerStream() { Logger::get()->log(ss.str() + '\n'); if (mAbortAfterLog) { - if (LoggerStream::mWriteAbortMessageToSystemLog) { - __android_log_print(ANDROID_LOG_FATAL, mLogTag.c_str(), "%s", ss.str().c_str()); - } else { - std::cout << ss.str() << std::endl; - } + std::cout << ss.str() << std::endl; abort(); } } @@ -103,29 +95,26 @@ return *this; } - static void writeAbortMessageToSystemLog(const std::string& logTag) { - LoggerStream::mWriteAbortMessageToSystemLog = true; - LoggerStream::mLogTag = logTag; - } - private: LoggerStream(const LoggerStream&) = delete; LoggerStream& operator=(const LoggerStream&) = delete; std::stringstream ss; bool mAbortAfterLog; - - static bool mWriteAbortMessageToSystemLog; - static std::string mLogTag; }; template <typename T> +inline std::string toString(const T& obj) { + return std::to_string(obj); +} + +template <typename T> inline std::string joinStr(const std::string& joint, const std::vector<T>& items) { std::stringstream ss; for (uint32_t i = 0; i < items.size(); i++) { if (i == 0) { - ss << items[i]; + ss << toString(items[i]); } else { - ss << joint << items[i]; + ss << joint << toString(items[i]); } } return ss.str(); @@ -145,15 +134,18 @@ inline std::string joinStr(const std::string& joint, int limit, const std::vector<T>& items) { if (items.size() > static_cast<size_t>(limit)) { std::vector<T> topMax(items.begin(), items.begin() + limit); - std::stringstream ss; - ss << joinStr(joint, topMax) << ", (" << (items.size() - limit) << " omitted), " - << items.back(); - return ss.str(); + return joinStr(joint, topMax) + ", (" + toString(items.size() - limit) + " ommited), " + + toString(items.back()); } else { return joinStr(joint, items); } } +static const char* kLifeTimeNames[6] = { + "TEMPORARY_VARIABLE", "SUBGRAPH_INPUT", "SUBGRAPH_OUTPUT", + "CONSTANT_COPY", "CONSTANT_REFERENCE", "NO_VALUE", +}; + static const bool kScalarDataType[]{ true, // ANEURALNETWORKS_FLOAT32 true, // ANEURALNETWORKS_INT32 @@ -190,9 +182,10 @@ 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED }; -inline std::ostream& operator<<(std::ostream& os, const RandomVariableType& type) { +template <> +inline std::string toString<RandomVariableType>(const RandomVariableType& type) { static const std::string typeNames[] = {"FREE", "CONST", "OP"}; - return os << typeNames[static_cast<int>(type)]; + return typeNames[static_cast<int>(type)]; } inline std::string alignedString(std::string str, int width) { @@ -201,45 +194,51 @@ return str; } -inline std::ostream& operator<<(std::ostream& os, const RandomVariableRange& range) { - return os << "[" + joinStr(", ", 20, range.getChoices()) + "]"; +template <> +inline std::string toString<RandomVariableRange>(const RandomVariableRange& range) { + return "[" + joinStr(", ", 20, range.getChoices()) + "]"; } -inline std::ostream& operator<<(std::ostream& os, const RandomOperandType& type) { +template <> +inline std::string toString<RandomOperandType>(const RandomOperandType& type) { static const std::string typeNames[] = {"Input", "Output", "Internal", "Parameter", "No Value"}; - return os << typeNames[static_cast<int>(type)]; + return typeNames[static_cast<int>(type)]; } -inline std::ostream& operator<<(std::ostream& os, const RandomVariableNode& var) { - os << "var" << var->index << " = "; +template <> +inline std::string toString<RandomVariableNode>(const RandomVariableNode& var) { + std::stringstream ss; + ss << "var" << var->index << " = "; switch (var->type) { case RandomVariableType::FREE: - os << "FREE " << var->range; + ss << "FREE " << toString(var->range); break; case RandomVariableType::CONST: - os << "CONST " << var->value; + ss << "CONST " << toString(var->value); break; case RandomVariableType::OP: - os << "var" << var->parent1->index << " " << var->op->getName(); - if (var->parent2 != nullptr) os << " var" << var->parent2->index; - os << ", " << var->range; + ss << "var" << var->parent1->index << " " << var->op->getName(); + if (var->parent2 != nullptr) ss << " var" << var->parent2->index; + ss << ", " << toString(var->range); break; default: NN_FUZZER_CHECK(false); } - os << ", timestamp = " << var->timestamp; - return os; + ss << ", timestamp = " << var->timestamp; + return ss.str(); } -inline std::ostream& operator<<(std::ostream& os, const RandomVariable& var) { - return os << "var" + std::to_string(var.get()->index); +template <> +inline std::string toString<RandomVariable>(const RandomVariable& var) { + return "var" + std::to_string(var.get()->index); } -inline std::ostream& operator<<(std::ostream& os, const RandomOperand& op) { - return os << op.type << ", dimension = [" - << joinStr(", ", op.dimensions, - [](const RandomVariable& var) { return std::to_string(var.getValue()); }) - << "], scale = " << op.scale << " , zero_point = " << op.zeroPoint; +template <> +inline std::string toString<RandomOperand>(const RandomOperand& op) { + return toString(op.type) + ", dimension = [" + + joinStr(", ", op.dimensions, + [](const RandomVariable& var) { return std::to_string(var.getValue()); }) + + "], scale = " + toString(op.scale) + " , zero_point = " + toString(op.zeroPoint); } // This class is a workaround for two issues our code relies on:
diff --git a/runtime/test/fuzzing/RandomVariable.cpp b/runtime/test/fuzzing/RandomVariable.cpp index f1067e1..d3f6ef7 100644 --- a/runtime/test/fuzzing/RandomVariable.cpp +++ b/runtime/test/fuzzing/RandomVariable.cpp
@@ -1,1225 +1,1225 @@ -/* - * Copyright (C) 2019 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "RandomVariable.h" - -#include <algorithm> -#include <memory> -#include <set> -#include <string> -#include <unordered_map> -#include <utility> -#include <vector> - -#include "RandomGraphGeneratorUtils.h" - -namespace android { -namespace nn { -namespace fuzzing_test { - -unsigned int RandomVariableBase::globalIndex = 0; -int RandomVariable::defaultValue = 10; - -RandomVariableBase::RandomVariableBase(int value) - : index(globalIndex++), - type(RandomVariableType::CONST), - range(value), - value(value), - timestamp(RandomVariableNetwork::get()->getGlobalTime()) {} - -RandomVariableBase::RandomVariableBase(int lower, int upper) - : index(globalIndex++), - type(RandomVariableType::FREE), - range(lower, upper), - timestamp(RandomVariableNetwork::get()->getGlobalTime()) {} - -RandomVariableBase::RandomVariableBase(const std::vector<int>& choices) - : index(globalIndex++), - type(RandomVariableType::FREE), - range(choices), - timestamp(RandomVariableNetwork::get()->getGlobalTime()) {} - -RandomVariableBase::RandomVariableBase(const RandomVariableNode& lhs, const RandomVariableNode& rhs, - const std::shared_ptr<const IRandomVariableOp>& op) - : index(globalIndex++), - type(RandomVariableType::OP), - range(op->getInitRange(lhs->range, rhs == nullptr ? RandomVariableRange(0) : rhs->range)), - op(op), - parent1(lhs), - parent2(rhs), - timestamp(RandomVariableNetwork::get()->getGlobalTime()) {} - -void RandomVariableRange::setRange(int lower, int upper) { - // kInvalidValue indicates unlimited bound. - auto head = lower == kInvalidValue ? mChoices.begin() - : std::lower_bound(mChoices.begin(), mChoices.end(), lower); - auto tail = upper == kInvalidValue ? mChoices.end() - : std::upper_bound(mChoices.begin(), mChoices.end(), upper); - NN_FUZZER_CHECK(head <= tail) << "Invalid range!"; - if (head != mChoices.begin() || tail != mChoices.end()) { - mChoices = std::vector<int>(head, tail); - } -} - -int RandomVariableRange::toConst() { - if (mChoices.size() > 1) mChoices = {getRandomChoice(mChoices)}; - return mChoices[0]; -} - -RandomVariableRange operator&(const RandomVariableRange& lhs, const RandomVariableRange& rhs) { - std::vector<int> result(lhs.size() + rhs.size()); - auto it = std::set_intersection(lhs.mChoices.begin(), lhs.mChoices.end(), rhs.mChoices.begin(), - rhs.mChoices.end(), result.begin()); - result.resize(it - result.begin()); - return RandomVariableRange(std::move(result)); -} - -void RandomVariableBase::freeze() { - if (type == RandomVariableType::CONST) return; - value = range.toConst(); - type = RandomVariableType::CONST; -} - -int RandomVariableBase::getValue() const { - switch (type) { - case RandomVariableType::CONST: - return value; - case RandomVariableType::OP: - return op->eval(parent1->getValue(), parent2 == nullptr ? 0 : parent2->getValue()); - default: - NN_FUZZER_CHECK(false) << "Invalid type when getting value of var" << index; - return 0; - } -} - -void RandomVariableBase::updateTimestamp() { - timestamp = RandomVariableNetwork::get()->getGlobalTime(); - NN_FUZZER_LOG << "Update timestamp of var" << index << " to " << timestamp; -} - -RandomVariable::RandomVariable(int value) : mVar(new RandomVariableBase(value)) { - NN_FUZZER_LOG << "New RandomVariable " << mVar; - RandomVariableNetwork::get()->add(mVar); -} -RandomVariable::RandomVariable(int lower, int upper) : mVar(new RandomVariableBase(lower, upper)) { - NN_FUZZER_LOG << "New RandomVariable " << mVar; - RandomVariableNetwork::get()->add(mVar); -} -RandomVariable::RandomVariable(const std::vector<int>& choices) - : mVar(new RandomVariableBase(choices)) { - NN_FUZZER_LOG << "New RandomVariable " << mVar; - RandomVariableNetwork::get()->add(mVar); -} -RandomVariable::RandomVariable(RandomVariableType type) - : mVar(new RandomVariableBase(1, defaultValue)) { - NN_FUZZER_CHECK(type == RandomVariableType::FREE); - NN_FUZZER_LOG << "New RandomVariable " << mVar; - RandomVariableNetwork::get()->add(mVar); -} -RandomVariable::RandomVariable(const RandomVariable& lhs, const RandomVariable& rhs, - const std::shared_ptr<const IRandomVariableOp>& op) - : mVar(new RandomVariableBase(lhs.get(), rhs.get(), op)) { - // Make a copy if the parent is CONST. This will resolve the fake dependency problem. - if (mVar->parent1->type == RandomVariableType::CONST) { - mVar->parent1 = RandomVariable(mVar->parent1->value).get(); - } - if (mVar->parent2 != nullptr && mVar->parent2->type == RandomVariableType::CONST) { - mVar->parent2 = RandomVariable(mVar->parent2->value).get(); - } - mVar->parent1->children.push_back(mVar); - if (mVar->parent2 != nullptr) mVar->parent2->children.push_back(mVar); - RandomVariableNetwork::get()->add(mVar); - NN_FUZZER_LOG << "New RandomVariable " << mVar; -} - -void RandomVariable::setRange(int lower, int upper) { - NN_FUZZER_CHECK(mVar != nullptr) << "setRange() on nullptr"; - NN_FUZZER_LOG << "Set range [" << lower << ", " << upper << "] on var" << mVar->index; - size_t oldSize = mVar->range.size(); - mVar->range.setRange(lower, upper); - // Only update the timestamp if the range is *indeed* narrowed down. - if (mVar->range.size() != oldSize) mVar->updateTimestamp(); -} - -RandomVariableRange IRandomVariableOp::getInitRange(const RandomVariableRange& lhs, - const RandomVariableRange& rhs) const { - std::set<int> st; - for (auto i : lhs.getChoices()) { - for (auto j : rhs.getChoices()) { - int res = this->eval(i, j); - if (res > kMaxValue || res < -kMaxValue) continue; - st.insert(res); - } - } - return RandomVariableRange(st); -} - -// Check if the range contains exactly all values in [min, max]. -static inline bool isContinuous(const std::set<int>* range) { - return (*(range->rbegin()) - *(range->begin()) + 1) == static_cast<int>(range->size()); -} - -// Fill the set with a range of values specified by [lower, upper]. -static inline void fillRange(std::set<int>* range, int lower, int upper) { - for (int i = lower; i <= upper; i++) range->insert(i); -} - -// The slowest algorithm: iterate through every combinations of parents and save the valid pairs. -void IRandomVariableOp::eval(const std::set<int>* parent1In, const std::set<int>* parent2In, - const std::set<int>* childIn, std::set<int>* parent1Out, - std::set<int>* parent2Out, std::set<int>* childOut) const { - // Avoid the binary search if the child is a closed range. - bool isChildInContinuous = isContinuous(childIn); - std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()}; - for (auto i : *parent1In) { - bool valid = false; - for (auto j : *parent2In) { - int res = this->eval(i, j); - // Avoid the binary search if obviously out of range. - if (res > child.second || res < child.first) continue; - if (isChildInContinuous || childIn->find(res) != childIn->end()) { - parent2Out->insert(j); - childOut->insert(res); - valid = true; - } - } - if (valid) parent1Out->insert(i); - } -} - -// A helper template to make a class into a Singleton. -template <class T> -class Singleton : public T { - public: - static const std::shared_ptr<const T>& get() { - static std::shared_ptr<const T> instance(new T); - return instance; - } -}; - -// A set of operations that only compute on a single input value. -class IUnaryOp : public IRandomVariableOp { - public: - using IRandomVariableOp::eval; - virtual int eval(int val) const = 0; - virtual int eval(int lhs, int) const override { return eval(lhs); } - // The slowest algorithm: iterate through every value of the parent and save the valid one. - virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, - const std::set<int>* childIn, std::set<int>* parent1Out, - std::set<int>* parent2Out, std::set<int>* childOut) const override { - NN_FUZZER_CHECK(parent2In == nullptr); - NN_FUZZER_CHECK(parent2Out == nullptr); - bool isChildInContinuous = isContinuous(childIn); - std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()}; - for (auto i : *parent1In) { - int res = this->eval(i); - if (res > child.second || res < child.first) continue; - if (isChildInContinuous || childIn->find(res) != childIn->end()) { - parent1Out->insert(i); - childOut->insert(res); - } - } - } -}; - -// A set of operations that only check conditional constraints. -class IConstraintOp : public IRandomVariableOp { - public: - using IRandomVariableOp::eval; - virtual bool check(int lhs, int rhs) const = 0; - virtual int eval(int lhs, int rhs) const override { - return check(lhs, rhs) ? 0 : kInvalidValue; - } - // The range for a constraint op is always {0}. - virtual RandomVariableRange getInitRange(const RandomVariableRange&, - const RandomVariableRange&) const override { - return RandomVariableRange(0); - } - // The slowest algorithm: - // iterate through every combinations of parents and save the valid pairs. - virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, - const std::set<int>*, std::set<int>* parent1Out, std::set<int>* parent2Out, - std::set<int>* childOut) const override { - for (auto i : *parent1In) { - bool valid = false; - for (auto j : *parent2In) { - if (this->check(i, j)) { - parent2Out->insert(j); - valid = true; - } - } - if (valid) parent1Out->insert(i); - } - if (!parent1Out->empty()) childOut->insert(0); - } -}; - -class Addition : public IRandomVariableOp { - public: - virtual int eval(int lhs, int rhs) const override { return lhs + rhs; } - virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs, - const RandomVariableRange& rhs) const override { - return RandomVariableRange(lhs.min() + rhs.min(), lhs.max() + rhs.max()); - } - virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, - const std::set<int>* childIn, std::set<int>* parent1Out, - std::set<int>* parent2Out, std::set<int>* childOut) const override { - if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) { - IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out, - childOut); - } else { - // For parents and child with close range, the out range can be computed directly - // without iterations. - std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()}; - std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()}; - std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()}; - - // From ranges for parent, evaluate range for child. - // [a, b] + [c, d] -> [a + c, b + d] - fillRange(childOut, std::max(child.first, parent1.first + parent2.first), - std::min(child.second, parent1.second + parent2.second)); - - // From ranges for child and one parent, evaluate range for another parent. - // [a, b] - [c, d] -> [a - d, b - c] - fillRange(parent1Out, std::max(parent1.first, child.first - parent2.second), - std::min(parent1.second, child.second - parent2.first)); - fillRange(parent2Out, std::max(parent2.first, child.first - parent1.second), - std::min(parent2.second, child.second - parent1.first)); - } - } - virtual const char* getName() const override { return "ADD"; } -}; - -class Subtraction : public IRandomVariableOp { - public: - virtual int eval(int lhs, int rhs) const override { return lhs - rhs; } - virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs, - const RandomVariableRange& rhs) const override { - return RandomVariableRange(lhs.min() - rhs.max(), lhs.max() - rhs.min()); - } - virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, - const std::set<int>* childIn, std::set<int>* parent1Out, - std::set<int>* parent2Out, std::set<int>* childOut) const override { - if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) { - IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out, - childOut); - } else { - // Similar algorithm as Addition. - std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()}; - std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()}; - std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()}; - fillRange(childOut, std::max(child.first, parent1.first - parent2.second), - std::min(child.second, parent1.second - parent2.first)); - fillRange(parent1Out, std::max(parent1.first, child.first + parent2.first), - std::min(parent1.second, child.second + parent2.second)); - fillRange(parent2Out, std::max(parent2.first, parent1.first - child.second), - std::min(parent2.second, parent1.second - child.first)); - } - } - virtual const char* getName() const override { return "SUB"; } -}; - -class Multiplication : public IRandomVariableOp { - public: - virtual int eval(int lhs, int rhs) const override { return lhs * rhs; } - virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs, - const RandomVariableRange& rhs) const override { - if (lhs.min() < 0 || rhs.min() < 0) { - return IRandomVariableOp::getInitRange(lhs, rhs); - } else { - int lower = std::min(lhs.min() * rhs.min(), kMaxValue); - int upper = std::min(lhs.max() * rhs.max(), kMaxValue); - return RandomVariableRange(lower, upper); - } - } - virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, - const std::set<int>* childIn, std::set<int>* parent1Out, - std::set<int>* parent2Out, std::set<int>* childOut) const override { - if (*parent1In->begin() < 0 || *parent2In->begin() < 0 || *childIn->begin() < 0) { - IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out, - childOut); - } else { - bool isChildInContinuous = isContinuous(childIn); - std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()}; - for (auto i : *parent1In) { - bool valid = false; - for (auto j : *parent2In) { - int res = this->eval(i, j); - // Since MUL increases monotonically with one value, break the loop if the - // result is larger than the limit. - if (res > child.second) break; - if (res < child.first) continue; - if (isChildInContinuous || childIn->find(res) != childIn->end()) { - valid = true; - parent2Out->insert(j); - childOut->insert(res); - } - } - if (valid) parent1Out->insert(i); - } - } - } - virtual const char* getName() const override { return "MUL"; } -}; - -class Division : public IRandomVariableOp { - public: - virtual int eval(int lhs, int rhs) const override { - return rhs == 0 ? kInvalidValue : lhs / rhs; - } - virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs, - const RandomVariableRange& rhs) const override { - if (lhs.min() < 0 || rhs.min() <= 0) { - return IRandomVariableOp::getInitRange(lhs, rhs); - } else { - return RandomVariableRange(lhs.min() / rhs.max(), lhs.max() / rhs.min()); - } - } - virtual const char* getName() const override { return "DIV"; } -}; - -class ExactDivision : public Division { - public: - virtual int eval(int lhs, int rhs) const override { - return (rhs == 0 || lhs % rhs != 0) ? kInvalidValue : lhs / rhs; - } - virtual const char* getName() const override { return "EXACT_DIV"; } -}; - -class Modulo : public IRandomVariableOp { - public: - virtual int eval(int lhs, int rhs) const override { - return rhs == 0 ? kInvalidValue : lhs % rhs; - } - virtual RandomVariableRange getInitRange(const RandomVariableRange&, - const RandomVariableRange& rhs) const override { - return RandomVariableRange(0, rhs.max()); - } - virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, - const std::set<int>* childIn, std::set<int>* parent1Out, - std::set<int>* parent2Out, std::set<int>* childOut) const override { - if (*childIn->begin() != 0 || childIn->size() != 1u) { - IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out, - childOut); - } else { - // For the special case that child is a const 0, it would be faster if the range for - // parents are evaluated separately. - - // Evaluate parent1 directly. - for (auto i : *parent1In) { - for (auto j : *parent2In) { - if (i % j == 0) { - parent1Out->insert(i); - break; - } - } - } - // Evaluate parent2, see if a multiple of parent2 value can be found in parent1. - int parent1Max = *parent1In->rbegin(); - for (auto i : *parent2In) { - int jMax = parent1Max / i; - for (int j = 1; j <= jMax; j++) { - if (parent1In->find(i * j) != parent1In->end()) { - parent2Out->insert(i); - break; - } - } - } - if (!parent1Out->empty()) childOut->insert(0); - } - } - virtual const char* getName() const override { return "MOD"; } -}; - -class Maximum : public IRandomVariableOp { - public: - virtual int eval(int lhs, int rhs) const override { return std::max(lhs, rhs); } - virtual const char* getName() const override { return "MAX"; } -}; - -class Minimum : public IRandomVariableOp { - public: - virtual int eval(int lhs, int rhs) const override { return std::min(lhs, rhs); } - virtual const char* getName() const override { return "MIN"; } -}; - -class Square : public IUnaryOp { - public: - virtual int eval(int val) const override { return val * val; } - virtual const char* getName() const override { return "SQUARE"; } -}; - -class UnaryEqual : public IUnaryOp { - public: - virtual int eval(int val) const override { return val; } - virtual const char* getName() const override { return "UNARY_EQUAL"; } -}; - -class Equal : public IConstraintOp { - public: - virtual bool check(int lhs, int rhs) const override { return lhs == rhs; } - virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, - const std::set<int>* childIn, std::set<int>* parent1Out, - std::set<int>* parent2Out, std::set<int>* childOut) const override { - NN_FUZZER_CHECK(childIn->size() == 1u && *childIn->begin() == 0); - // The intersection of two sets can be found in O(n). - std::set_intersection(parent1In->begin(), parent1In->end(), parent2In->begin(), - parent2In->end(), std::inserter(*parent1Out, parent1Out->begin())); - *parent2Out = *parent1Out; - childOut->insert(0); - } - virtual const char* getName() const override { return "EQUAL"; } -}; - -class GreaterThan : public IConstraintOp { - public: - virtual bool check(int lhs, int rhs) const override { return lhs > rhs; } - virtual const char* getName() const override { return "GREATER_THAN"; } -}; - -class GreaterEqual : public IConstraintOp { - public: - virtual bool check(int lhs, int rhs) const override { return lhs >= rhs; } - virtual const char* getName() const override { return "GREATER_EQUAL"; } -}; - -class FloatMultiplication : public IUnaryOp { - public: - FloatMultiplication(float multiplicand) : mMultiplicand(multiplicand) {} - virtual int eval(int val) const override { - return static_cast<int>(std::floor(static_cast<float>(val) * mMultiplicand)); - } - virtual const char* getName() const override { return "MUL_FLOAT"; } - - private: - float mMultiplicand; -}; - -// Arithmetic operators and methods on RandomVariables will create OP RandomVariableNodes. -// Since there must be at most one edge between two RandomVariableNodes, we have to do something -// special when both sides are refering to the same node. - -RandomVariable operator+(const RandomVariable& lhs, const RandomVariable& rhs) { - return lhs.get() == rhs.get() ? RandomVariable(lhs, 2, Singleton<Multiplication>::get()) - : RandomVariable(lhs, rhs, Singleton<Addition>::get()); -} -RandomVariable operator-(const RandomVariable& lhs, const RandomVariable& rhs) { - return lhs.get() == rhs.get() ? RandomVariable(0) - : RandomVariable(lhs, rhs, Singleton<Subtraction>::get()); -} -RandomVariable operator*(const RandomVariable& lhs, const RandomVariable& rhs) { - return lhs.get() == rhs.get() ? RandomVariable(lhs, RandomVariable(), Singleton<Square>::get()) - : RandomVariable(lhs, rhs, Singleton<Multiplication>::get()); -} -RandomVariable operator*(const RandomVariable& lhs, const float& rhs) { - return RandomVariable(lhs, RandomVariable(), std::make_shared<FloatMultiplication>(rhs)); -} -RandomVariable operator/(const RandomVariable& lhs, const RandomVariable& rhs) { - return lhs.get() == rhs.get() ? RandomVariable(1) - : RandomVariable(lhs, rhs, Singleton<Division>::get()); -} -RandomVariable operator%(const RandomVariable& lhs, const RandomVariable& rhs) { - return lhs.get() == rhs.get() ? RandomVariable(0) - : RandomVariable(lhs, rhs, Singleton<Modulo>::get()); -} -RandomVariable max(const RandomVariable& lhs, const RandomVariable& rhs) { - return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Maximum>::get()); -} -RandomVariable min(const RandomVariable& lhs, const RandomVariable& rhs) { - return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Minimum>::get()); -} - -RandomVariable RandomVariable::exactDiv(const RandomVariable& other) { - return mVar == other.get() ? RandomVariable(1) - : RandomVariable(*this, other, Singleton<ExactDivision>::get()); -} - -RandomVariable RandomVariable::setEqual(const RandomVariable& other) const { - RandomVariableNode node1 = mVar, node2 = other.get(); - NN_FUZZER_LOG << "Set equality of var" << node1->index << " and var" << node2->index; - - // Do not setEqual on the same pair twice. - if (node1 == node2 || (node1->op == Singleton<UnaryEqual>::get() && node1->parent1 == node2) || - (node2->op == Singleton<UnaryEqual>::get() && node2->parent1 == node1)) { - NN_FUZZER_LOG << "Already equal. Return."; - return RandomVariable(); - } - - // If possible, always try UnaryEqual first to reduce the search space. - // UnaryEqual can be used if node B is FREE and is evaluated later than node A. - // TODO: Reduce code duplication. - if (RandomVariableNetwork::get()->isSubordinate(node1, node2)) { - NN_FUZZER_LOG << " Make var" << node2->index << " a child of var" << node1->index; - node2->type = RandomVariableType::OP; - node2->parent1 = node1; - node2->op = Singleton<UnaryEqual>::get(); - node1->children.push_back(node2); - RandomVariableNetwork::get()->join(node1, node2); - node1->updateTimestamp(); - return other; - } - if (RandomVariableNetwork::get()->isSubordinate(node2, node1)) { - NN_FUZZER_LOG << " Make var" << node1->index << " a child of var" << node2->index; - node1->type = RandomVariableType::OP; - node1->parent1 = node2; - node1->op = Singleton<UnaryEqual>::get(); - node2->children.push_back(node1); - RandomVariableNetwork::get()->join(node2, node1); - node1->updateTimestamp(); - return *this; - } - return RandomVariable(*this, other, Singleton<Equal>::get()); -} - -RandomVariable RandomVariable::setGreaterThan(const RandomVariable& other) const { - NN_FUZZER_CHECK(mVar != other.get()); - return RandomVariable(*this, other, Singleton<GreaterThan>::get()); -} -RandomVariable RandomVariable::setGreaterEqual(const RandomVariable& other) const { - return mVar == other.get() ? *this - : RandomVariable(*this, other, Singleton<GreaterEqual>::get()); -} - -void DisjointNetwork::add(const RandomVariableNode& var) { - // Find the subnet index of the parents and decide the index for var. - int ind1 = var->parent1 == nullptr ? -1 : mIndexMap[var->parent1]; - int ind2 = var->parent2 == nullptr ? -1 : mIndexMap[var->parent2]; - int ind = join(ind1, ind2); - // If no parent, put it into a new subnet component. - if (ind == -1) ind = mNextIndex++; - NN_FUZZER_LOG << "Add RandomVariable var" << var->index << " to network #" << ind; - mIndexMap[var] = ind; - mEvalOrderMap[ind].push_back(var); -} - -int DisjointNetwork::join(int ind1, int ind2) { - if (ind1 == -1) return ind2; - if (ind2 == -1) return ind1; - if (ind1 == ind2) return ind1; - NN_FUZZER_LOG << "Join network #" << ind1 << " and #" << ind2; - auto &order1 = mEvalOrderMap[ind1], &order2 = mEvalOrderMap[ind2]; - // Append every node in ind2 to the end of ind1 - for (const auto& var : order2) { - order1.push_back(var); - mIndexMap[var] = ind1; - } - // Remove ind2 from mEvalOrderMap. - mEvalOrderMap.erase(mEvalOrderMap.find(ind2)); - return ind1; -} - -RandomVariableNetwork* RandomVariableNetwork::get() { - static RandomVariableNetwork instance; - return &instance; -} - -void RandomVariableNetwork::initialize(int defaultValue) { - RandomVariableBase::globalIndex = 0; - RandomVariable::defaultValue = defaultValue; - mIndexMap.clear(); - mEvalOrderMap.clear(); - mDimProd.clear(); - mNextIndex = 0; - mGlobalTime = 0; - mTimestamp = -1; -} - -bool RandomVariableNetwork::isSubordinate(const RandomVariableNode& node1, - const RandomVariableNode& node2) { - if (node2->type != RandomVariableType::FREE) return false; - int ind1 = mIndexMap[node1]; - // node2 is of a different subnet. - if (ind1 != mIndexMap[node2]) return true; - for (const auto& node : mEvalOrderMap[ind1]) { - if (node == node2) return false; - // node2 is of the same subnet but evaluated later than node1. - if (node == node1) return true; - } - NN_FUZZER_CHECK(false) << "Code executed in non-reachable region."; - return false; -} - -struct EvalInfo { - // The RandomVariableNode that this EvalInfo is associated with. - // var->value is the current value during evaluation. - RandomVariableNode var; - - // The RandomVariable value is staged when a valid combination is found. - std::set<int> staging; - - // The staging values are committed after a subnet evaluation. - std::set<int> committed; - - // Keeps track of the latest timestamp that committed is updated. - int timestamp; - - // For evalSubnetWithLocalNetwork. - RandomVariableType originalType; - - // Should only invoke eval on OP RandomVariable. - bool eval() { - NN_FUZZER_CHECK(var->type == RandomVariableType::OP); - var->value = var->op->eval(var->parent1->value, - var->parent2 == nullptr ? 0 : var->parent2->value); - if (var->value == kInvalidValue) return false; - return committed.find(var->value) != committed.end(); - } - void stage() { staging.insert(var->value); } - void commit() { - // Only update committed and timestamp if the range is *indeed* changed. - if (staging.size() != committed.size()) { - committed = std::move(staging); - timestamp = RandomVariableNetwork::get()->getGlobalTime(); - } - staging.clear(); - } - void updateRange() { - // Only update range and timestamp if the range is *indeed* changed. - if (committed.size() != var->range.size()) { - var->range = RandomVariableRange(committed); - var->timestamp = timestamp; - } - committed.clear(); - } - - EvalInfo(const RandomVariableNode& var) - : var(var), - committed(var->range.getChoices().begin(), var->range.getChoices().end()), - timestamp(var->timestamp) {} -}; -using EvalContext = std::unordered_map<RandomVariableNode, EvalInfo>; - -// For logging only. -inline std::string toString(const RandomVariableNode& var, EvalContext* context) { - std::stringstream ss; - ss << "var" << var->index << " = "; - const auto& committed = context->at(var).committed; - switch (var->type) { - case RandomVariableType::FREE: - ss << "FREE [" - << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end())) << "]"; - break; - case RandomVariableType::CONST: - ss << "CONST " << var->value; - break; - case RandomVariableType::OP: - ss << "var" << var->parent1->index << " " << var->op->getName(); - if (var->parent2 != nullptr) ss << " var" << var->parent2->index; - ss << ", [" << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end())) - << "]"; - break; - default: - NN_FUZZER_CHECK(false); - } - ss << ", timestamp = " << context->at(var).timestamp; - return ss.str(); -} - -// Check if the subnet needs to be re-evaluated by comparing the timestamps. -static inline bool needEvaluate(const EvaluationOrder& evalOrder, int subnetTime, - EvalContext* context = nullptr) { - for (const auto& var : evalOrder) { - int timestamp = context == nullptr ? var->timestamp : context->at(var).timestamp; - // If we find a node that has been modified since last evaluation, the subnet needs to be - // re-evaluated. - if (timestamp > subnetTime) return true; - } - return false; -} - -// Helper function to evaluate the subnet recursively. -// Iterate through all combinations of FREE RandomVariables choices. -static void evalSubnetHelper(const EvaluationOrder& evalOrder, EvalContext* context, size_t i = 0) { - if (i == evalOrder.size()) { - // Reach the end of the evaluation, find a valid combination. - for (auto& var : evalOrder) context->at(var).stage(); - return; - } - const auto& var = evalOrder[i]; - if (var->type == RandomVariableType::FREE) { - // For FREE RandomVariable, iterate through all valid choices. - for (int val : context->at(var).committed) { - var->value = val; - evalSubnetHelper(evalOrder, context, i + 1); - } - return; - } else if (var->type == RandomVariableType::OP) { - // For OP RandomVariable, evaluate from parents and terminate if the result is invalid. - if (!context->at(var).eval()) return; - } - evalSubnetHelper(evalOrder, context, i + 1); -} - -// Check if the subnet has only one single OP RandomVariable. -static inline bool isSingleOpSubnet(const EvaluationOrder& evalOrder) { - int numOp = 0; - for (const auto& var : evalOrder) { - if (var->type == RandomVariableType::OP) numOp++; - if (numOp > 1) return false; - } - return numOp != 0; -} - -// Evaluate with a potentially faster approach provided by IRandomVariableOp. -static inline void evalSubnetSingleOpHelper(const EvaluationOrder& evalOrder, - EvalContext* context) { - NN_FUZZER_LOG << "Identified as single op subnet"; - const auto& var = evalOrder.back(); - NN_FUZZER_CHECK(var->type == RandomVariableType::OP); - var->op->eval(&context->at(var->parent1).committed, - var->parent2 == nullptr ? nullptr : &context->at(var->parent2).committed, - &context->at(var).committed, &context->at(var->parent1).staging, - var->parent2 == nullptr ? nullptr : &context->at(var->parent2).staging, - &context->at(var).staging); -} - -// Check if the number of combinations of FREE RandomVariables exceeds the limit. -static inline uint64_t getNumCombinations(const EvaluationOrder& evalOrder, - EvalContext* context = nullptr) { - constexpr uint64_t kLimit = 1e8; - uint64_t numCombinations = 1; - for (const auto& var : evalOrder) { - if (var->type == RandomVariableType::FREE) { - size_t size = - context == nullptr ? var->range.size() : context->at(var).committed.size(); - numCombinations *= size; - // To prevent overflow. - if (numCombinations > kLimit) return kLimit; - } - } - return numCombinations; -} - -// Evaluate the subnet recursively. Will return fail if the number of combinations of FREE -// RandomVariable exceeds the threshold kMaxNumCombinations. -static bool evalSubnetWithBruteForce(const EvaluationOrder& evalOrder, EvalContext* context) { - constexpr uint64_t kMaxNumCombinations = 1e7; - NN_FUZZER_LOG << "Evaluate with brute force"; - if (isSingleOpSubnet(evalOrder)) { - // If the network only have one single OP, dispatch to a faster evaluation. - evalSubnetSingleOpHelper(evalOrder, context); - } else { - if (getNumCombinations(evalOrder, context) > kMaxNumCombinations) { - NN_FUZZER_LOG << "Terminate the evaluation because of large search range"; - std::cout << "[ ] Terminate the evaluation because of large search range" - << std::endl; - return false; - } - evalSubnetHelper(evalOrder, context); - } - for (auto& var : evalOrder) { - if (context->at(var).staging.empty()) { - NN_FUZZER_LOG << "Evaluation failed at " << toString(var, context); - return false; - } - context->at(var).commit(); - } - return true; -} - -struct LocalNetwork { - EvaluationOrder evalOrder; - std::vector<RandomVariableNode> bridgeNodes; - int timestamp = 0; - - bool eval(EvalContext* context) { - NN_FUZZER_LOG << "Evaluate local network with timestamp = " << timestamp; - // Temporarily treat bridge nodes as FREE RandomVariables. - for (const auto& var : bridgeNodes) { - context->at(var).originalType = var->type; - var->type = RandomVariableType::FREE; - } - for (const auto& var : evalOrder) { - context->at(var).staging.clear(); - NN_FUZZER_LOG << " - " << toString(var, context); - } - bool success = evalSubnetWithBruteForce(evalOrder, context); - // Reset the RandomVariable types for bridge nodes. - for (const auto& var : bridgeNodes) var->type = context->at(var).originalType; - return success; - } -}; - -// Partition the network further into LocalNetworks based on the result from bridge annotation -// algorithm. -class GraphPartitioner : public DisjointNetwork { - public: - GraphPartitioner() = default; - - std::vector<LocalNetwork> partition(const EvaluationOrder& evalOrder, int timestamp) { - annotateBridge(evalOrder); - for (const auto& var : evalOrder) add(var); - return get(timestamp); - } - - private: - GraphPartitioner(const GraphPartitioner&) = delete; - GraphPartitioner& operator=(const GraphPartitioner&) = delete; - - // Find the parent-child relationship between var1 and var2, and reset the bridge. - void setBridgeFlag(const RandomVariableNode& var1, const RandomVariableNode& var2) { - if (var1->parent1 == var2) { - mBridgeInfo[var1].isParent1Bridge = true; - } else if (var1->parent2 == var2) { - mBridgeInfo[var1].isParent2Bridge = true; - } else { - setBridgeFlag(var2, var1); - } - } - - // Annoate the bridges with DFS -- an edge [u, v] is a bridge if none of u's ancestor is - // reachable from a node in the subtree of b. The complexity is O(V + E). - // discoveryTime: The timestamp a node is visited - // lowTime: The min discovery time of all reachable nodes from the subtree of the node. - void annotateBridgeHelper(const RandomVariableNode& var, int* time) { - mBridgeInfo[var].visited = true; - mBridgeInfo[var].discoveryTime = mBridgeInfo[var].lowTime = (*time)++; - - // The algorithm operates on undirected graph. First find all adjacent nodes. - auto adj = var->children; - if (var->parent1 != nullptr) adj.push_back(var->parent1); - if (var->parent2 != nullptr) adj.push_back(var->parent2); - - for (const auto& weakChild : adj) { - auto child = weakChild.lock(); - NN_FUZZER_CHECK(child != nullptr); - if (mBridgeInfo.find(child) == mBridgeInfo.end()) continue; - if (!mBridgeInfo[child].visited) { - mBridgeInfo[child].parent = var; - annotateBridgeHelper(child, time); - - // If none of nodes in the subtree of child is connected to any ancestors of var, - // then it is a bridge. - mBridgeInfo[var].lowTime = - std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].lowTime); - if (mBridgeInfo[child].lowTime > mBridgeInfo[var].discoveryTime) - setBridgeFlag(var, child); - } else if (mBridgeInfo[var].parent != child) { - mBridgeInfo[var].lowTime = - std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].discoveryTime); - } - } - } - - // Find all bridges in the subnet with DFS. - void annotateBridge(const EvaluationOrder& evalOrder) { - for (const auto& var : evalOrder) mBridgeInfo[var]; - int time = 0; - for (const auto& var : evalOrder) { - if (!mBridgeInfo[var].visited) annotateBridgeHelper(var, &time); - } - } - - // Re-partition the network by treating bridges as no edge. - void add(const RandomVariableNode& var) { - auto parent1 = var->parent1; - auto parent2 = var->parent2; - if (mBridgeInfo[var].isParent1Bridge) var->parent1 = nullptr; - if (mBridgeInfo[var].isParent2Bridge) var->parent2 = nullptr; - DisjointNetwork::add(var); - var->parent1 = parent1; - var->parent2 = parent2; - } - - // Add bridge nodes to the local network and remove single node subnet. - std::vector<LocalNetwork> get(int timestamp) { - std::vector<LocalNetwork> res; - for (auto& pair : mEvalOrderMap) { - // We do not need to evaluate subnet with only a single node. - if (pair.second.size() == 1 && pair.second[0]->parent1 == nullptr) continue; - res.emplace_back(); - for (const auto& var : pair.second) { - if (mBridgeInfo[var].isParent1Bridge) { - res.back().evalOrder.push_back(var->parent1); - res.back().bridgeNodes.push_back(var->parent1); - } - if (mBridgeInfo[var].isParent2Bridge) { - res.back().evalOrder.push_back(var->parent2); - res.back().bridgeNodes.push_back(var->parent2); - } - res.back().evalOrder.push_back(var); - } - res.back().timestamp = timestamp; - } - return res; - } - - // For bridge discovery algorithm. - struct BridgeInfo { - bool isParent1Bridge = false; - bool isParent2Bridge = false; - int discoveryTime = 0; - int lowTime = 0; - bool visited = false; - std::shared_ptr<RandomVariableBase> parent = nullptr; - }; - std::unordered_map<RandomVariableNode, BridgeInfo> mBridgeInfo; -}; - -// Evaluate subnets repeatedly until converge. -// Class T_Subnet must have member evalOrder, timestamp, and member function eval. -template <class T_Subnet> -inline bool evalSubnetsRepeatedly(std::vector<T_Subnet>* subnets, EvalContext* context) { - bool terminate = false; - while (!terminate) { - terminate = true; - for (auto& subnet : *subnets) { - if (needEvaluate(subnet.evalOrder, subnet.timestamp, context)) { - if (!subnet.eval(context)) return false; - subnet.timestamp = RandomVariableNetwork::get()->getGlobalTime(); - terminate = false; - } - } - } - return true; -} - -// Evaluate the subnet by first partitioning it further into LocalNetworks. -static bool evalSubnetWithLocalNetwork(const EvaluationOrder& evalOrder, int timestamp, - EvalContext* context) { - NN_FUZZER_LOG << "Evaluate with local network"; - auto localNetworks = GraphPartitioner().partition(evalOrder, timestamp); - return evalSubnetsRepeatedly(&localNetworks, context); -} - -struct LeafNetwork { - EvaluationOrder evalOrder; - int timestamp = 0; - LeafNetwork(const RandomVariableNode& var, int timestamp) : timestamp(timestamp) { - std::set<RandomVariableNode> visited; - constructorHelper(var, &visited); - } - // Construct the leaf network by recursively including parent nodes. - void constructorHelper(const RandomVariableNode& var, std::set<RandomVariableNode>* visited) { - if (var == nullptr || visited->find(var) != visited->end()) return; - constructorHelper(var->parent1, visited); - constructorHelper(var->parent2, visited); - visited->insert(var); - evalOrder.push_back(var); - } - bool eval(EvalContext* context) { - return evalSubnetWithLocalNetwork(evalOrder, timestamp, context); - } -}; - -// Evaluate the subnet by leaf network. -// NOTE: This algorithm will only produce correct result for *most* of the time (> 99%). -// The random graph generator is expected to retry if it fails. -static bool evalSubnetWithLeafNetwork(const EvaluationOrder& evalOrder, int timestamp, - EvalContext* context) { - NN_FUZZER_LOG << "Evaluate with leaf network"; - // Construct leaf networks. - std::vector<LeafNetwork> leafNetworks; - for (const auto& var : evalOrder) { - if (var->children.empty()) { - NN_FUZZER_LOG << "Found leaf " << toString(var, context); - leafNetworks.emplace_back(var, timestamp); - } - } - return evalSubnetsRepeatedly(&leafNetworks, context); -} - -void RandomVariableNetwork::addDimensionProd(const std::vector<RandomVariable>& dims) { - if (dims.size() <= 1) return; - EvaluationOrder order; - for (const auto& dim : dims) order.push_back(dim.get()); - mDimProd.push_back(order); -} - -bool enforceDimProd(const std::vector<EvaluationOrder>& mDimProd, - const std::unordered_map<RandomVariableNode, int>& indexMap, - EvalContext* context, std::set<int>* dirtySubnets) { - for (auto& evalOrder : mDimProd) { - NN_FUZZER_LOG << " Dimension product network size = " << evalOrder.size(); - // Initialize EvalInfo of each RandomVariable. - for (auto& var : evalOrder) { - if (context->find(var) == context->end()) context->emplace(var, var); - NN_FUZZER_LOG << " - " << toString(var, context); - } - - // Enforce the product of the dimension values below kMaxValue: - // max(dimA) = kMaxValue / (min(dimB) * min(dimC) * ...) - int prod = 1; - for (const auto& var : evalOrder) prod *= (*context->at(var).committed.begin()); - for (auto& var : evalOrder) { - auto& committed = context->at(var).committed; - int maxValue = kMaxValue / (prod / *committed.begin()); - auto it = committed.upper_bound(maxValue); - // var has empty range -> no solution. - if (it == committed.begin()) return false; - // The range is not modified -> continue. - if (it == committed.end()) continue; - // The range is modified -> the subnet of var is dirty, i.e. needs re-evaluation. - committed.erase(it, committed.end()); - context->at(var).timestamp = RandomVariableNetwork::get()->getGlobalTime(); - dirtySubnets->insert(indexMap.at(var)); - } - } - return true; -} - -bool RandomVariableNetwork::evalRange() { - constexpr uint64_t kMaxNumCombinationsWithBruteForce = 500; - constexpr uint64_t kMaxNumCombinationsWithLocalNetwork = 1e5; - NN_FUZZER_LOG << "Evaluate on " << mEvalOrderMap.size() << " sub-networks"; - EvalContext context; - std::set<int> dirtySubnets; // Which subnets needs evaluation. - for (auto& pair : mEvalOrderMap) { - const auto& evalOrder = pair.second; - // Decide whether needs evaluation by timestamp -- if no range has changed after the last - // evaluation, then the subnet does not need re-evaluation. - if (evalOrder.size() == 1 || !needEvaluate(evalOrder, mTimestamp)) continue; - dirtySubnets.insert(pair.first); - } - if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false; - - // Repeat until the ranges converge. - while (!dirtySubnets.empty()) { - for (int ind : dirtySubnets) { - const auto& evalOrder = mEvalOrderMap[ind]; - NN_FUZZER_LOG << " Sub-network #" << ind << " size = " << evalOrder.size(); - - // Initialize EvalInfo of each RandomVariable. - for (auto& var : evalOrder) { - if (context.find(var) == context.end()) context.emplace(var, var); - NN_FUZZER_LOG << " - " << toString(var, &context); - } - - // Dispatch to different algorithm according to search range. - bool success; - uint64_t numCombinations = getNumCombinations(evalOrder); - if (numCombinations <= kMaxNumCombinationsWithBruteForce) { - success = evalSubnetWithBruteForce(evalOrder, &context); - } else if (numCombinations <= kMaxNumCombinationsWithLocalNetwork) { - success = evalSubnetWithLocalNetwork(evalOrder, mTimestamp, &context); - } else { - success = evalSubnetWithLeafNetwork(evalOrder, mTimestamp, &context); - } - if (!success) return false; - } - dirtySubnets.clear(); - if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false; - } - // A successful evaluation, update RandomVariables from EvalContext. - for (auto& pair : context) pair.second.updateRange(); - mTimestamp = getGlobalTime(); - NN_FUZZER_LOG << "Finish range evaluation"; - return true; -} - -static void unsetEqual(const RandomVariableNode& node) { - if (node == nullptr) return; - NN_FUZZER_LOG << "Unset equality of var" << node->index; - auto weakPtrEqual = [&node](const std::weak_ptr<RandomVariableBase>& ptr) { - return ptr.lock() == node; - }; - RandomVariableNode parent1 = node->parent1, parent2 = node->parent2; - parent1->children.erase( - std::find_if(parent1->children.begin(), parent1->children.end(), weakPtrEqual)); - node->parent1 = nullptr; - if (parent2 != nullptr) { - // For Equal. - parent2->children.erase( - std::find_if(parent2->children.begin(), parent2->children.end(), weakPtrEqual)); - node->parent2 = nullptr; - } else { - // For UnaryEqual. - node->type = RandomVariableType::FREE; - node->op = nullptr; - } -} - -// A class to revert all the changes made to RandomVariableNetwork since the Reverter object is -// constructed. Only used when setEqualIfCompatible results in incompatible. -class RandomVariableNetwork::Reverter { - public: - // Take a snapshot of RandomVariableNetwork when Reverter is constructed. - Reverter() : mSnapshot(*RandomVariableNetwork::get()) {} - // Add constraint (Equal) nodes to the reverter. - void addNode(const RandomVariableNode& node) { mEqualNodes.push_back(node); } - void revert() { - NN_FUZZER_LOG << "Revert RandomVariableNetwork"; - // Release the constraints. - for (const auto& node : mEqualNodes) unsetEqual(node); - // Reset all member variables. - *RandomVariableNetwork::get() = std::move(mSnapshot); - } - - private: - Reverter(const Reverter&) = delete; - Reverter& operator=(const Reverter&) = delete; - RandomVariableNetwork mSnapshot; - std::vector<RandomVariableNode> mEqualNodes; -}; - -bool RandomVariableNetwork::setEqualIfCompatible(const std::vector<RandomVariable>& lhs, - const std::vector<RandomVariable>& rhs) { - NN_FUZZER_LOG << "Check compatibility of {" << joinStr(", ", lhs) << "} and {" - << joinStr(", ", rhs) << "}"; - if (lhs.size() != rhs.size()) return false; - Reverter reverter; - bool result = true; - for (size_t i = 0; i < lhs.size(); i++) { - auto node = lhs[i].setEqual(rhs[i]).get(); - reverter.addNode(node); - // Early terminate if there is no common choice between two ranges. - if (node != nullptr && node->range.empty()) result = false; - } - result = result && evalRange(); - if (!result) reverter.revert(); - NN_FUZZER_LOG << "setEqualIfCompatible: " << (result ? "[COMPATIBLE]" : "[INCOMPATIBLE]"); - return result; -} - -bool RandomVariableNetwork::freeze() { - NN_FUZZER_LOG << "Freeze the random network"; - if (!evalRange()) return false; - - std::vector<RandomVariableNode> nodes; - for (const auto& pair : mEvalOrderMap) { - // Find all FREE RandomVariables in the subnet. - for (const auto& var : pair.second) { - if (var->type == RandomVariableType::FREE) nodes.push_back(var); - } - } - - // Randomly shuffle the order, this is for a more uniform randomness. - randomShuffle(&nodes); - - // An inefficient algorithm that does freeze -> re-evaluate for every FREE RandomVariable. - // TODO: Might be able to optimize this. - for (const auto& var : nodes) { - if (var->type != RandomVariableType::FREE) continue; - size_t size = var->range.size(); - NN_FUZZER_LOG << "Freeze " << var; - var->freeze(); - NN_FUZZER_LOG << " " << var; - // There is no need to re-evaluate if the FREE RandomVariable have only one choice. - if (size > 1) { - var->updateTimestamp(); - if (!evalRange()) { - NN_FUZZER_LOG << "Freeze failed at " << var; - return false; - } - } - } - NN_FUZZER_LOG << "Finish freezing the random network"; - return true; -} - -} // namespace fuzzing_test -} // namespace nn -} // namespace android +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "RandomVariable.h" + +#include <algorithm> +#include <memory> +#include <set> +#include <string> +#include <unordered_map> +#include <utility> +#include <vector> + +#include "RandomGraphGeneratorUtils.h" + +namespace android { +namespace nn { +namespace fuzzing_test { + +unsigned int RandomVariableBase::globalIndex = 0; +int RandomVariable::defaultValue = 10; + +RandomVariableBase::RandomVariableBase(int value) + : index(globalIndex++), + type(RandomVariableType::CONST), + range(value), + value(value), + timestamp(RandomVariableNetwork::get()->getGlobalTime()) {} + +RandomVariableBase::RandomVariableBase(int lower, int upper) + : index(globalIndex++), + type(RandomVariableType::FREE), + range(lower, upper), + timestamp(RandomVariableNetwork::get()->getGlobalTime()) {} + +RandomVariableBase::RandomVariableBase(const std::vector<int>& choices) + : index(globalIndex++), + type(RandomVariableType::FREE), + range(choices), + timestamp(RandomVariableNetwork::get()->getGlobalTime()) {} + +RandomVariableBase::RandomVariableBase(const RandomVariableNode& lhs, const RandomVariableNode& rhs, + const std::shared_ptr<const IRandomVariableOp>& op) + : index(globalIndex++), + type(RandomVariableType::OP), + range(op->getInitRange(lhs->range, rhs == nullptr ? RandomVariableRange(0) : rhs->range)), + op(op), + parent1(lhs), + parent2(rhs), + timestamp(RandomVariableNetwork::get()->getGlobalTime()) {} + +void RandomVariableRange::setRange(int lower, int upper) { + // kInvalidValue indicates unlimited bound. + auto head = lower == kInvalidValue ? mChoices.begin() + : std::lower_bound(mChoices.begin(), mChoices.end(), lower); + auto tail = upper == kInvalidValue ? mChoices.end() + : std::upper_bound(mChoices.begin(), mChoices.end(), upper); + NN_FUZZER_CHECK(head <= tail) << "Invalid range!"; + if (head != mChoices.begin() || tail != mChoices.end()) { + mChoices = std::vector<int>(head, tail); + } +} + +int RandomVariableRange::toConst() { + if (mChoices.size() > 1) mChoices = {getRandomChoice(mChoices)}; + return mChoices[0]; +} + +RandomVariableRange operator&(const RandomVariableRange& lhs, const RandomVariableRange& rhs) { + std::vector<int> result(lhs.size() + rhs.size()); + auto it = std::set_intersection(lhs.mChoices.begin(), lhs.mChoices.end(), rhs.mChoices.begin(), + rhs.mChoices.end(), result.begin()); + result.resize(it - result.begin()); + return RandomVariableRange(std::move(result)); +} + +void RandomVariableBase::freeze() { + if (type == RandomVariableType::CONST) return; + value = range.toConst(); + type = RandomVariableType::CONST; +} + +int RandomVariableBase::getValue() const { + switch (type) { + case RandomVariableType::CONST: + return value; + case RandomVariableType::OP: + return op->eval(parent1->getValue(), parent2 == nullptr ? 0 : parent2->getValue()); + default: + NN_FUZZER_CHECK(false) << "Invalid type when getting value of var" << index; + return 0; + } +} + +void RandomVariableBase::updateTimestamp() { + timestamp = RandomVariableNetwork::get()->getGlobalTime(); + NN_FUZZER_LOG << "Update timestamp of var" << index << " to " << timestamp; +} + +RandomVariable::RandomVariable(int value) : mVar(new RandomVariableBase(value)) { + NN_FUZZER_LOG << "New RandomVariable " << toString(mVar); + RandomVariableNetwork::get()->add(mVar); +} +RandomVariable::RandomVariable(int lower, int upper) : mVar(new RandomVariableBase(lower, upper)) { + NN_FUZZER_LOG << "New RandomVariable " << toString(mVar); + RandomVariableNetwork::get()->add(mVar); +} +RandomVariable::RandomVariable(const std::vector<int>& choices) + : mVar(new RandomVariableBase(choices)) { + NN_FUZZER_LOG << "New RandomVariable " << toString(mVar); + RandomVariableNetwork::get()->add(mVar); +} +RandomVariable::RandomVariable(RandomVariableType type) + : mVar(new RandomVariableBase(1, defaultValue)) { + NN_FUZZER_CHECK(type == RandomVariableType::FREE); + NN_FUZZER_LOG << "New RandomVariable " << toString(mVar); + RandomVariableNetwork::get()->add(mVar); +} +RandomVariable::RandomVariable(const RandomVariable& lhs, const RandomVariable& rhs, + const std::shared_ptr<const IRandomVariableOp>& op) + : mVar(new RandomVariableBase(lhs.get(), rhs.get(), op)) { + // Make a copy if the parent is CONST. This will resolve the fake dependency problem. + if (mVar->parent1->type == RandomVariableType::CONST) { + mVar->parent1 = RandomVariable(mVar->parent1->value).get(); + } + if (mVar->parent2 != nullptr && mVar->parent2->type == RandomVariableType::CONST) { + mVar->parent2 = RandomVariable(mVar->parent2->value).get(); + } + mVar->parent1->children.push_back(mVar); + if (mVar->parent2 != nullptr) mVar->parent2->children.push_back(mVar); + RandomVariableNetwork::get()->add(mVar); + NN_FUZZER_LOG << "New RandomVariable " << toString(mVar); +} + +void RandomVariable::setRange(int lower, int upper) { + NN_FUZZER_CHECK(mVar != nullptr) << "setRange() on nullptr"; + NN_FUZZER_LOG << "Set range [" << lower << ", " << upper << "] on var" << mVar->index; + size_t oldSize = mVar->range.size(); + mVar->range.setRange(lower, upper); + // Only update the timestamp if the range is *indeed* narrowed down. + if (mVar->range.size() != oldSize) mVar->updateTimestamp(); +} + +RandomVariableRange IRandomVariableOp::getInitRange(const RandomVariableRange& lhs, + const RandomVariableRange& rhs) const { + std::set<int> st; + for (auto i : lhs.getChoices()) { + for (auto j : rhs.getChoices()) { + int res = this->eval(i, j); + if (res > kMaxValue || res < -kMaxValue) continue; + st.insert(res); + } + } + return RandomVariableRange(st); +} + +// Check if the range contains exactly all values in [min, max]. +static inline bool isContinuous(const std::set<int>* range) { + return (*(range->rbegin()) - *(range->begin()) + 1) == static_cast<int>(range->size()); +} + +// Fill the set with a range of values specified by [lower, upper]. +static inline void fillRange(std::set<int>* range, int lower, int upper) { + for (int i = lower; i <= upper; i++) range->insert(i); +} + +// The slowest algorithm: iterate through every combinations of parents and save the valid pairs. +void IRandomVariableOp::eval(const std::set<int>* parent1In, const std::set<int>* parent2In, + const std::set<int>* childIn, std::set<int>* parent1Out, + std::set<int>* parent2Out, std::set<int>* childOut) const { + // Avoid the binary search if the child is a closed range. + bool isChildInContinuous = isContinuous(childIn); + std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()}; + for (auto i : *parent1In) { + bool valid = false; + for (auto j : *parent2In) { + int res = this->eval(i, j); + // Avoid the binary search if obviously out of range. + if (res > child.second || res < child.first) continue; + if (isChildInContinuous || childIn->find(res) != childIn->end()) { + parent2Out->insert(j); + childOut->insert(res); + valid = true; + } + } + if (valid) parent1Out->insert(i); + } +} + +// A helper template to make a class into a Singleton. +template <class T> +class Singleton : public T { + public: + static const std::shared_ptr<const T>& get() { + static std::shared_ptr<const T> instance(new T); + return instance; + } +}; + +// A set of operations that only compute on a single input value. +class IUnaryOp : public IRandomVariableOp { + public: + using IRandomVariableOp::eval; + virtual int eval(int val) const = 0; + virtual int eval(int lhs, int) const override { return eval(lhs); } + // The slowest algorithm: iterate through every value of the parent and save the valid one. + virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, + const std::set<int>* childIn, std::set<int>* parent1Out, + std::set<int>* parent2Out, std::set<int>* childOut) const override { + NN_FUZZER_CHECK(parent2In == nullptr); + NN_FUZZER_CHECK(parent2Out == nullptr); + bool isChildInContinuous = isContinuous(childIn); + std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()}; + for (auto i : *parent1In) { + int res = this->eval(i); + if (res > child.second || res < child.first) continue; + if (isChildInContinuous || childIn->find(res) != childIn->end()) { + parent1Out->insert(i); + childOut->insert(res); + } + } + } +}; + +// A set of operations that only check conditional constraints. +class IConstraintOp : public IRandomVariableOp { + public: + using IRandomVariableOp::eval; + virtual bool check(int lhs, int rhs) const = 0; + virtual int eval(int lhs, int rhs) const override { + return check(lhs, rhs) ? 0 : kInvalidValue; + } + // The range for a constraint op is always {0}. + virtual RandomVariableRange getInitRange(const RandomVariableRange&, + const RandomVariableRange&) const override { + return RandomVariableRange(0); + } + // The slowest algorithm: + // iterate through every combinations of parents and save the valid pairs. + virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, + const std::set<int>*, std::set<int>* parent1Out, std::set<int>* parent2Out, + std::set<int>* childOut) const override { + for (auto i : *parent1In) { + bool valid = false; + for (auto j : *parent2In) { + if (this->check(i, j)) { + parent2Out->insert(j); + valid = true; + } + } + if (valid) parent1Out->insert(i); + } + if (!parent1Out->empty()) childOut->insert(0); + } +}; + +class Addition : public IRandomVariableOp { + public: + virtual int eval(int lhs, int rhs) const override { return lhs + rhs; } + virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs, + const RandomVariableRange& rhs) const override { + return RandomVariableRange(lhs.min() + rhs.min(), lhs.max() + rhs.max()); + } + virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, + const std::set<int>* childIn, std::set<int>* parent1Out, + std::set<int>* parent2Out, std::set<int>* childOut) const override { + if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) { + IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out, + childOut); + } else { + // For parents and child with close range, the out range can be computed directly + // without iterations. + std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()}; + std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()}; + std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()}; + + // From ranges for parent, evalute range for child. + // [a, b] + [c, d] -> [a + c, b + d] + fillRange(childOut, std::max(child.first, parent1.first + parent2.first), + std::min(child.second, parent1.second + parent2.second)); + + // From ranges for child and one parent, evalute range for another parent. + // [a, b] - [c, d] -> [a - d, b - c] + fillRange(parent1Out, std::max(parent1.first, child.first - parent2.second), + std::min(parent1.second, child.second - parent2.first)); + fillRange(parent2Out, std::max(parent2.first, child.first - parent1.second), + std::min(parent2.second, child.second - parent1.first)); + } + } + virtual const char* getName() const override { return "ADD"; } +}; + +class Subtraction : public IRandomVariableOp { + public: + virtual int eval(int lhs, int rhs) const override { return lhs - rhs; } + virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs, + const RandomVariableRange& rhs) const override { + return RandomVariableRange(lhs.min() - rhs.max(), lhs.max() - rhs.min()); + } + virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, + const std::set<int>* childIn, std::set<int>* parent1Out, + std::set<int>* parent2Out, std::set<int>* childOut) const override { + if (!isContinuous(parent1In) || !isContinuous(parent2In) || !isContinuous(childIn)) { + IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out, + childOut); + } else { + // Similar algorithm as Addition. + std::pair<int, int> parent1 = {*parent1In->begin(), *parent1In->rbegin()}; + std::pair<int, int> parent2 = {*parent2In->begin(), *parent2In->rbegin()}; + std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()}; + fillRange(childOut, std::max(child.first, parent1.first - parent2.second), + std::min(child.second, parent1.second - parent2.first)); + fillRange(parent1Out, std::max(parent1.first, child.first + parent2.first), + std::min(parent1.second, child.second + parent2.second)); + fillRange(parent2Out, std::max(parent2.first, parent1.first - child.second), + std::min(parent2.second, parent1.second - child.first)); + } + } + virtual const char* getName() const override { return "SUB"; } +}; + +class Multiplication : public IRandomVariableOp { + public: + virtual int eval(int lhs, int rhs) const override { return lhs * rhs; } + virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs, + const RandomVariableRange& rhs) const override { + if (lhs.min() < 0 || rhs.min() < 0) { + return IRandomVariableOp::getInitRange(lhs, rhs); + } else { + int lower = std::min(lhs.min() * rhs.min(), kMaxValue); + int upper = std::min(lhs.max() * rhs.max(), kMaxValue); + return RandomVariableRange(lower, upper); + } + } + virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, + const std::set<int>* childIn, std::set<int>* parent1Out, + std::set<int>* parent2Out, std::set<int>* childOut) const override { + if (*parent1In->begin() < 0 || *parent2In->begin() < 0 || *childIn->begin() < 0) { + IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out, + childOut); + } else { + bool isChildInContinuous = isContinuous(childIn); + std::pair<int, int> child = {*childIn->begin(), *childIn->rbegin()}; + for (auto i : *parent1In) { + bool valid = false; + for (auto j : *parent2In) { + int res = this->eval(i, j); + // Since MUL increases monotonically with one value, break the loop if the + // result is larger than the limit. + if (res > child.second) break; + if (res < child.first) continue; + if (isChildInContinuous || childIn->find(res) != childIn->end()) { + valid = true; + parent2Out->insert(j); + childOut->insert(res); + } + } + if (valid) parent1Out->insert(i); + } + } + } + virtual const char* getName() const override { return "MUL"; } +}; + +class Division : public IRandomVariableOp { + public: + virtual int eval(int lhs, int rhs) const override { + return rhs == 0 ? kInvalidValue : lhs / rhs; + } + virtual RandomVariableRange getInitRange(const RandomVariableRange& lhs, + const RandomVariableRange& rhs) const override { + if (lhs.min() < 0 || rhs.min() <= 0) { + return IRandomVariableOp::getInitRange(lhs, rhs); + } else { + return RandomVariableRange(lhs.min() / rhs.max(), lhs.max() / rhs.min()); + } + } + virtual const char* getName() const override { return "DIV"; } +}; + +class ExactDivision : public Division { + public: + virtual int eval(int lhs, int rhs) const override { + return (rhs == 0 || lhs % rhs != 0) ? kInvalidValue : lhs / rhs; + } + virtual const char* getName() const override { return "EXACT_DIV"; } +}; + +class Modulo : public IRandomVariableOp { + public: + virtual int eval(int lhs, int rhs) const override { + return rhs == 0 ? kInvalidValue : lhs % rhs; + } + virtual RandomVariableRange getInitRange(const RandomVariableRange&, + const RandomVariableRange& rhs) const override { + return RandomVariableRange(0, rhs.max()); + } + virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, + const std::set<int>* childIn, std::set<int>* parent1Out, + std::set<int>* parent2Out, std::set<int>* childOut) const override { + if (*childIn->begin() != 0 || childIn->size() != 1u) { + IRandomVariableOp::eval(parent1In, parent2In, childIn, parent1Out, parent2Out, + childOut); + } else { + // For the special case that child is a const 0, it would be faster if the range for + // parents are evaluated separately. + + // Evalute parent1 directly. + for (auto i : *parent1In) { + for (auto j : *parent2In) { + if (i % j == 0) { + parent1Out->insert(i); + break; + } + } + } + // Evalute parent2, see if a multiple of parent2 value can be found in parent1. + int parent1Max = *parent1In->rbegin(); + for (auto i : *parent2In) { + int jMax = parent1Max / i; + for (int j = 1; j <= jMax; j++) { + if (parent1In->find(i * j) != parent1In->end()) { + parent2Out->insert(i); + break; + } + } + } + if (!parent1Out->empty()) childOut->insert(0); + } + } + virtual const char* getName() const override { return "MOD"; } +}; + +class Maximum : public IRandomVariableOp { + public: + virtual int eval(int lhs, int rhs) const override { return std::max(lhs, rhs); } + virtual const char* getName() const override { return "MAX"; } +}; + +class Minimum : public IRandomVariableOp { + public: + virtual int eval(int lhs, int rhs) const override { return std::min(lhs, rhs); } + virtual const char* getName() const override { return "MIN"; } +}; + +class Square : public IUnaryOp { + public: + virtual int eval(int val) const override { return val * val; } + virtual const char* getName() const override { return "SQUARE"; } +}; + +class UnaryEqual : public IUnaryOp { + public: + virtual int eval(int val) const override { return val; } + virtual const char* getName() const override { return "UNARY_EQUAL"; } +}; + +class Equal : public IConstraintOp { + public: + virtual bool check(int lhs, int rhs) const override { return lhs == rhs; } + virtual void eval(const std::set<int>* parent1In, const std::set<int>* parent2In, + const std::set<int>* childIn, std::set<int>* parent1Out, + std::set<int>* parent2Out, std::set<int>* childOut) const override { + NN_FUZZER_CHECK(childIn->size() == 1u && *childIn->begin() == 0); + // The intersection of two sets can be found in O(n). + std::set_intersection(parent1In->begin(), parent1In->end(), parent2In->begin(), + parent2In->end(), std::inserter(*parent1Out, parent1Out->begin())); + *parent2Out = *parent1Out; + childOut->insert(0); + } + virtual const char* getName() const override { return "EQUAL"; } +}; + +class GreaterThan : public IConstraintOp { + public: + virtual bool check(int lhs, int rhs) const override { return lhs > rhs; } + virtual const char* getName() const override { return "GREATER_THAN"; } +}; + +class GreaterEqual : public IConstraintOp { + public: + virtual bool check(int lhs, int rhs) const override { return lhs >= rhs; } + virtual const char* getName() const override { return "GREATER_EQUAL"; } +}; + +class FloatMultiplication : public IUnaryOp { + public: + FloatMultiplication(float multiplicand) : mMultiplicand(multiplicand) {} + virtual int eval(int val) const override { + return static_cast<int>(std::floor(static_cast<float>(val) * mMultiplicand)); + } + virtual const char* getName() const override { return "MUL_FLOAT"; } + + private: + float mMultiplicand; +}; + +// Arithmetic operators and methods on RandomVariables will create OP RandomVariableNodes. +// Since there must be at most one edge between two RandomVariableNodes, we have to do something +// special when both sides are refering to the same node. + +RandomVariable operator+(const RandomVariable& lhs, const RandomVariable& rhs) { + return lhs.get() == rhs.get() ? RandomVariable(lhs, 2, Singleton<Multiplication>::get()) + : RandomVariable(lhs, rhs, Singleton<Addition>::get()); +} +RandomVariable operator-(const RandomVariable& lhs, const RandomVariable& rhs) { + return lhs.get() == rhs.get() ? RandomVariable(0) + : RandomVariable(lhs, rhs, Singleton<Subtraction>::get()); +} +RandomVariable operator*(const RandomVariable& lhs, const RandomVariable& rhs) { + return lhs.get() == rhs.get() ? RandomVariable(lhs, RandomVariable(), Singleton<Square>::get()) + : RandomVariable(lhs, rhs, Singleton<Multiplication>::get()); +} +RandomVariable operator*(const RandomVariable& lhs, const float& rhs) { + return RandomVariable(lhs, RandomVariable(), std::make_shared<FloatMultiplication>(rhs)); +} +RandomVariable operator/(const RandomVariable& lhs, const RandomVariable& rhs) { + return lhs.get() == rhs.get() ? RandomVariable(1) + : RandomVariable(lhs, rhs, Singleton<Division>::get()); +} +RandomVariable operator%(const RandomVariable& lhs, const RandomVariable& rhs) { + return lhs.get() == rhs.get() ? RandomVariable(0) + : RandomVariable(lhs, rhs, Singleton<Modulo>::get()); +} +RandomVariable max(const RandomVariable& lhs, const RandomVariable& rhs) { + return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Maximum>::get()); +} +RandomVariable min(const RandomVariable& lhs, const RandomVariable& rhs) { + return lhs.get() == rhs.get() ? lhs : RandomVariable(lhs, rhs, Singleton<Minimum>::get()); +} + +RandomVariable RandomVariable::exactDiv(const RandomVariable& other) { + return mVar == other.get() ? RandomVariable(1) + : RandomVariable(*this, other, Singleton<ExactDivision>::get()); +} + +RandomVariable RandomVariable::setEqual(const RandomVariable& other) const { + RandomVariableNode node1 = mVar, node2 = other.get(); + NN_FUZZER_LOG << "Set equality of var" << node1->index << " and var" << node2->index; + + // Do not setEqual on the same pair twice. + if (node1 == node2 || (node1->op == Singleton<UnaryEqual>::get() && node1->parent1 == node2) || + (node2->op == Singleton<UnaryEqual>::get() && node2->parent1 == node1)) { + NN_FUZZER_LOG << "Already equal. Return."; + return RandomVariable(); + } + + // If possible, always try UnaryEqual first to reduce the search space. + // UnaryEqual can be used if node B is FREE and is evaluated later than node A. + // TODO: Reduce code duplication. + if (RandomVariableNetwork::get()->isSubordinate(node1, node2)) { + NN_FUZZER_LOG << " Make var" << node2->index << " a child of var" << node1->index; + node2->type = RandomVariableType::OP; + node2->parent1 = node1; + node2->op = Singleton<UnaryEqual>::get(); + node1->children.push_back(node2); + RandomVariableNetwork::get()->join(node1, node2); + node1->updateTimestamp(); + return other; + } + if (RandomVariableNetwork::get()->isSubordinate(node2, node1)) { + NN_FUZZER_LOG << " Make var" << node1->index << " a child of var" << node2->index; + node1->type = RandomVariableType::OP; + node1->parent1 = node2; + node1->op = Singleton<UnaryEqual>::get(); + node2->children.push_back(node1); + RandomVariableNetwork::get()->join(node2, node1); + node1->updateTimestamp(); + return *this; + } + return RandomVariable(*this, other, Singleton<Equal>::get()); +} + +RandomVariable RandomVariable::setGreaterThan(const RandomVariable& other) const { + NN_FUZZER_CHECK(mVar != other.get()); + return RandomVariable(*this, other, Singleton<GreaterThan>::get()); +} +RandomVariable RandomVariable::setGreaterEqual(const RandomVariable& other) const { + return mVar == other.get() ? *this + : RandomVariable(*this, other, Singleton<GreaterEqual>::get()); +} + +void DisjointNetwork::add(const RandomVariableNode& var) { + // Find the subnet index of the parents and decide the index for var. + int ind1 = var->parent1 == nullptr ? -1 : mIndexMap[var->parent1]; + int ind2 = var->parent2 == nullptr ? -1 : mIndexMap[var->parent2]; + int ind = join(ind1, ind2); + // If no parent, put it into a new subnet component. + if (ind == -1) ind = mNextIndex++; + NN_FUZZER_LOG << "Add RandomVariable var" << var->index << " to network #" << ind; + mIndexMap[var] = ind; + mEvalOrderMap[ind].push_back(var); +} + +int DisjointNetwork::join(int ind1, int ind2) { + if (ind1 == -1) return ind2; + if (ind2 == -1) return ind1; + if (ind1 == ind2) return ind1; + NN_FUZZER_LOG << "Join network #" << ind1 << " and #" << ind2; + auto &order1 = mEvalOrderMap[ind1], &order2 = mEvalOrderMap[ind2]; + // Append every node in ind2 to the end of ind1 + for (const auto& var : order2) { + order1.push_back(var); + mIndexMap[var] = ind1; + } + // Remove ind2 from mEvalOrderMap. + mEvalOrderMap.erase(mEvalOrderMap.find(ind2)); + return ind1; +} + +RandomVariableNetwork* RandomVariableNetwork::get() { + static RandomVariableNetwork instance; + return &instance; +} + +void RandomVariableNetwork::initialize(int defaultValue) { + RandomVariableBase::globalIndex = 0; + RandomVariable::defaultValue = defaultValue; + mIndexMap.clear(); + mEvalOrderMap.clear(); + mDimProd.clear(); + mNextIndex = 0; + mGlobalTime = 0; + mTimestamp = -1; +} + +bool RandomVariableNetwork::isSubordinate(const RandomVariableNode& node1, + const RandomVariableNode& node2) { + if (node2->type != RandomVariableType::FREE) return false; + int ind1 = mIndexMap[node1]; + // node2 is of a different subnet. + if (ind1 != mIndexMap[node2]) return true; + for (const auto& node : mEvalOrderMap[ind1]) { + if (node == node2) return false; + // node2 is of the same subnet but evaluated later than node1. + if (node == node1) return true; + } + NN_FUZZER_CHECK(false) << "Code executed in non-reachable region."; + return false; +} + +struct EvalInfo { + // The RandomVariableNode that this EvalInfo is associated with. + // var->value is the current value during evaluation. + RandomVariableNode var; + + // The RandomVariable value is staged when a valid combination is found. + std::set<int> staging; + + // The staging values are committed after a subnet evaluation. + std::set<int> committed; + + // Keeps track of the latest timestamp that committed is updated. + int timestamp; + + // For evalSubnetWithLocalNetwork. + RandomVariableType originalType; + + // Should only invoke eval on OP RandomVariable. + bool eval() { + NN_FUZZER_CHECK(var->type == RandomVariableType::OP); + var->value = var->op->eval(var->parent1->value, + var->parent2 == nullptr ? 0 : var->parent2->value); + if (var->value == kInvalidValue) return false; + return committed.find(var->value) != committed.end(); + } + void stage() { staging.insert(var->value); } + void commit() { + // Only update committed and timestamp if the range is *indeed* changed. + if (staging.size() != committed.size()) { + committed = std::move(staging); + timestamp = RandomVariableNetwork::get()->getGlobalTime(); + } + staging.clear(); + } + void updateRange() { + // Only update range and timestamp if the range is *indeed* changed. + if (committed.size() != var->range.size()) { + var->range = RandomVariableRange(committed); + var->timestamp = timestamp; + } + committed.clear(); + } + + EvalInfo(const RandomVariableNode& var) + : var(var), + committed(var->range.getChoices().begin(), var->range.getChoices().end()), + timestamp(var->timestamp) {} +}; +using EvalContext = std::unordered_map<RandomVariableNode, EvalInfo>; + +// For logging only. +inline std::string toString(const RandomVariableNode& var, EvalContext* context) { + std::stringstream ss; + ss << "var" << var->index << " = "; + const auto& committed = context->at(var).committed; + switch (var->type) { + case RandomVariableType::FREE: + ss << "FREE [" + << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end())) << "]"; + break; + case RandomVariableType::CONST: + ss << "CONST " << toString(var->value); + break; + case RandomVariableType::OP: + ss << "var" << var->parent1->index << " " << var->op->getName(); + if (var->parent2 != nullptr) ss << " var" << var->parent2->index; + ss << ", [" << joinStr(", ", 20, std::vector<int>(committed.begin(), committed.end())) + << "]"; + break; + default: + NN_FUZZER_CHECK(false); + } + ss << ", timestamp = " << context->at(var).timestamp; + return ss.str(); +} + +// Check if the subnet needs to be re-evaluated by comparing the timestamps. +static inline bool needEvaluate(const EvaluationOrder& evalOrder, int subnetTime, + EvalContext* context = nullptr) { + for (const auto& var : evalOrder) { + int timestamp = context == nullptr ? var->timestamp : context->at(var).timestamp; + // If we find a node that has been modified since last evaluation, the subnet needs to be + // re-evaluated. + if (timestamp > subnetTime) return true; + } + return false; +} + +// Helper function to evaluate the subnet recursively. +// Iterate through all combinations of FREE RandomVariables choices. +static void evalSubnetHelper(const EvaluationOrder& evalOrder, EvalContext* context, size_t i = 0) { + if (i == evalOrder.size()) { + // Reach the end of the evaluation, find a valid combination. + for (auto& var : evalOrder) context->at(var).stage(); + return; + } + const auto& var = evalOrder[i]; + if (var->type == RandomVariableType::FREE) { + // For FREE RandomVariable, iterate through all valid choices. + for (int val : context->at(var).committed) { + var->value = val; + evalSubnetHelper(evalOrder, context, i + 1); + } + return; + } else if (var->type == RandomVariableType::OP) { + // For OP RandomVariable, evaluate from parents and terminate if the result is invalid. + if (!context->at(var).eval()) return; + } + evalSubnetHelper(evalOrder, context, i + 1); +} + +// Check if the subnet has only one single OP RandomVariable. +static inline bool isSingleOpSubnet(const EvaluationOrder& evalOrder) { + int numOp = 0; + for (const auto& var : evalOrder) { + if (var->type == RandomVariableType::OP) numOp++; + if (numOp > 1) return false; + } + return numOp != 0; +} + +// Evaluate with a potentially faster approach provided by IRandomVariableOp. +static inline void evalSubnetSingleOpHelper(const EvaluationOrder& evalOrder, + EvalContext* context) { + NN_FUZZER_LOG << "Identified as single op subnet"; + const auto& var = evalOrder.back(); + NN_FUZZER_CHECK(var->type == RandomVariableType::OP); + var->op->eval(&context->at(var->parent1).committed, + var->parent2 == nullptr ? nullptr : &context->at(var->parent2).committed, + &context->at(var).committed, &context->at(var->parent1).staging, + var->parent2 == nullptr ? nullptr : &context->at(var->parent2).staging, + &context->at(var).staging); +} + +// Check if the number of combinations of FREE RandomVariables exceeds the limit. +static inline uint64_t getNumCombinations(const EvaluationOrder& evalOrder, + EvalContext* context = nullptr) { + constexpr uint64_t kLimit = 1e8; + uint64_t numCombinations = 1; + for (const auto& var : evalOrder) { + if (var->type == RandomVariableType::FREE) { + size_t size = + context == nullptr ? var->range.size() : context->at(var).committed.size(); + numCombinations *= size; + // To prevent overflow. + if (numCombinations > kLimit) return kLimit; + } + } + return numCombinations; +} + +// Evaluate the subnet recursively. Will return fail if the number of combinations of FREE +// RandomVariable exceeds the threshold kMaxNumCombinations. +static bool evalSubnetWithBruteForce(const EvaluationOrder& evalOrder, EvalContext* context) { + constexpr uint64_t kMaxNumCombinations = 1e7; + NN_FUZZER_LOG << "Evaluate with brute force"; + if (isSingleOpSubnet(evalOrder)) { + // If the network only have one single OP, dispatch to a faster evaluation. + evalSubnetSingleOpHelper(evalOrder, context); + } else { + if (getNumCombinations(evalOrder, context) > kMaxNumCombinations) { + NN_FUZZER_LOG << "Terminate the evaluation because of large search range"; + std::cout << "[ ] Terminate the evaluation because of large search range" + << std::endl; + return false; + } + evalSubnetHelper(evalOrder, context); + } + for (auto& var : evalOrder) { + if (context->at(var).staging.empty()) { + NN_FUZZER_LOG << "Evaluation failed at " << toString(var, context); + return false; + } + context->at(var).commit(); + } + return true; +} + +struct LocalNetwork { + EvaluationOrder evalOrder; + std::vector<RandomVariableNode> bridgeNodes; + int timestamp = 0; + + bool eval(EvalContext* context) { + NN_FUZZER_LOG << "Evaluate local network with timestamp = " << timestamp; + // Temporarily treat bridge nodes as FREE RandomVariables. + for (const auto& var : bridgeNodes) { + context->at(var).originalType = var->type; + var->type = RandomVariableType::FREE; + } + for (const auto& var : evalOrder) { + context->at(var).staging.clear(); + NN_FUZZER_LOG << " - " << toString(var, context); + } + bool success = evalSubnetWithBruteForce(evalOrder, context); + // Reset the RandomVariable types for bridge nodes. + for (const auto& var : bridgeNodes) var->type = context->at(var).originalType; + return success; + } +}; + +// Partition the network further into LocalNetworks based on the result from bridge annotation +// algorithm. +class GraphPartitioner : public DisjointNetwork { + public: + GraphPartitioner() = default; + + std::vector<LocalNetwork> partition(const EvaluationOrder& evalOrder, int timestamp) { + annotateBridge(evalOrder); + for (const auto& var : evalOrder) add(var); + return get(timestamp); + } + + private: + GraphPartitioner(const GraphPartitioner&) = delete; + GraphPartitioner& operator=(const GraphPartitioner&) = delete; + + // Find the parent-child relationship between var1 and var2, and reset the bridge. + void setBridgeFlag(const RandomVariableNode& var1, const RandomVariableNode& var2) { + if (var1->parent1 == var2) { + mBridgeInfo[var1].isParent1Bridge = true; + } else if (var1->parent2 == var2) { + mBridgeInfo[var1].isParent2Bridge = true; + } else { + setBridgeFlag(var2, var1); + } + } + + // Annoate the bridges with DFS -- an edge [u, v] is a bridge if none of u's ancestor is + // reachable from a node in the subtree of b. The complexity is O(V + E). + // discoveryTime: The timestamp a node is visited + // lowTime: The min discovery time of all reachable nodes from the subtree of the node. + void annotateBridgeHelper(const RandomVariableNode& var, int* time) { + mBridgeInfo[var].visited = true; + mBridgeInfo[var].discoveryTime = mBridgeInfo[var].lowTime = (*time)++; + + // The algorithm operates on undirected graph. First find all adjacent nodes. + auto adj = var->children; + if (var->parent1 != nullptr) adj.push_back(var->parent1); + if (var->parent2 != nullptr) adj.push_back(var->parent2); + + for (const auto& weakChild : adj) { + auto child = weakChild.lock(); + NN_FUZZER_CHECK(child != nullptr); + if (mBridgeInfo.find(child) == mBridgeInfo.end()) continue; + if (!mBridgeInfo[child].visited) { + mBridgeInfo[child].parent = var; + annotateBridgeHelper(child, time); + + // If none of nodes in the subtree of child is connected to any ancestors of var, + // then it is a bridge. + mBridgeInfo[var].lowTime = + std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].lowTime); + if (mBridgeInfo[child].lowTime > mBridgeInfo[var].discoveryTime) + setBridgeFlag(var, child); + } else if (mBridgeInfo[var].parent != child) { + mBridgeInfo[var].lowTime = + std::min(mBridgeInfo[var].lowTime, mBridgeInfo[child].discoveryTime); + } + } + } + + // Find all bridges in the subnet with DFS. + void annotateBridge(const EvaluationOrder& evalOrder) { + for (const auto& var : evalOrder) mBridgeInfo[var]; + int time = 0; + for (const auto& var : evalOrder) { + if (!mBridgeInfo[var].visited) annotateBridgeHelper(var, &time); + } + } + + // Re-partition the network by treating bridges as no edge. + void add(const RandomVariableNode& var) { + auto parent1 = var->parent1; + auto parent2 = var->parent2; + if (mBridgeInfo[var].isParent1Bridge) var->parent1 = nullptr; + if (mBridgeInfo[var].isParent2Bridge) var->parent2 = nullptr; + DisjointNetwork::add(var); + var->parent1 = parent1; + var->parent2 = parent2; + } + + // Add bridge nodes to the local network and remove single node subnet. + std::vector<LocalNetwork> get(int timestamp) { + std::vector<LocalNetwork> res; + for (auto& pair : mEvalOrderMap) { + // We do not need to evaluate subnet with only a single node. + if (pair.second.size() == 1 && pair.second[0]->parent1 == nullptr) continue; + res.emplace_back(); + for (const auto& var : pair.second) { + if (mBridgeInfo[var].isParent1Bridge) { + res.back().evalOrder.push_back(var->parent1); + res.back().bridgeNodes.push_back(var->parent1); + } + if (mBridgeInfo[var].isParent2Bridge) { + res.back().evalOrder.push_back(var->parent2); + res.back().bridgeNodes.push_back(var->parent2); + } + res.back().evalOrder.push_back(var); + } + res.back().timestamp = timestamp; + } + return res; + } + + // For bridge discovery algorithm. + struct BridgeInfo { + bool isParent1Bridge = false; + bool isParent2Bridge = false; + int discoveryTime = 0; + int lowTime = 0; + bool visited = false; + std::shared_ptr<RandomVariableBase> parent = nullptr; + }; + std::unordered_map<RandomVariableNode, BridgeInfo> mBridgeInfo; +}; + +// Evaluate subnets repeatedly until converge. +// Class T_Subnet must have member evalOrder, timestamp, and member function eval. +template <class T_Subnet> +inline bool evalSubnetsRepeatedly(std::vector<T_Subnet>* subnets, EvalContext* context) { + bool terminate = false; + while (!terminate) { + terminate = true; + for (auto& subnet : *subnets) { + if (needEvaluate(subnet.evalOrder, subnet.timestamp, context)) { + if (!subnet.eval(context)) return false; + subnet.timestamp = RandomVariableNetwork::get()->getGlobalTime(); + terminate = false; + } + } + } + return true; +} + +// Evaluate the subnet by first partitioning it further into LocalNetworks. +static bool evalSubnetWithLocalNetwork(const EvaluationOrder& evalOrder, int timestamp, + EvalContext* context) { + NN_FUZZER_LOG << "Evaluate with local network"; + auto localNetworks = GraphPartitioner().partition(evalOrder, timestamp); + return evalSubnetsRepeatedly(&localNetworks, context); +} + +struct LeafNetwork { + EvaluationOrder evalOrder; + int timestamp = 0; + LeafNetwork(const RandomVariableNode& var, int timestamp) : timestamp(timestamp) { + std::set<RandomVariableNode> visited; + constructorHelper(var, &visited); + } + // Construct the leaf network by recursively including parent nodes. + void constructorHelper(const RandomVariableNode& var, std::set<RandomVariableNode>* visited) { + if (var == nullptr || visited->find(var) != visited->end()) return; + constructorHelper(var->parent1, visited); + constructorHelper(var->parent2, visited); + visited->insert(var); + evalOrder.push_back(var); + } + bool eval(EvalContext* context) { + return evalSubnetWithLocalNetwork(evalOrder, timestamp, context); + } +}; + +// Evaluate the subnet by leaf network. +// NOTE: This algorithm will only produce correct result for *most* of the time (> 99%). +// The random graph generator is expected to retry if it fails. +static bool evalSubnetWithLeafNetwork(const EvaluationOrder& evalOrder, int timestamp, + EvalContext* context) { + NN_FUZZER_LOG << "Evaluate with leaf network"; + // Construct leaf networks. + std::vector<LeafNetwork> leafNetworks; + for (const auto& var : evalOrder) { + if (var->children.empty()) { + NN_FUZZER_LOG << "Found leaf " << toString(var, context); + leafNetworks.emplace_back(var, timestamp); + } + } + return evalSubnetsRepeatedly(&leafNetworks, context); +} + +void RandomVariableNetwork::addDimensionProd(const std::vector<RandomVariable>& dims) { + if (dims.size() <= 1) return; + EvaluationOrder order; + for (const auto& dim : dims) order.push_back(dim.get()); + mDimProd.push_back(order); +} + +bool enforceDimProd(const std::vector<EvaluationOrder>& mDimProd, + const std::unordered_map<RandomVariableNode, int>& indexMap, + EvalContext* context, std::set<int>* dirtySubnets) { + for (auto& evalOrder : mDimProd) { + NN_FUZZER_LOG << " Dimension product network size = " << evalOrder.size(); + // Initialize EvalInfo of each RandomVariable. + for (auto& var : evalOrder) { + if (context->find(var) == context->end()) context->emplace(var, var); + NN_FUZZER_LOG << " - " << toString(var, context); + } + + // Enforce the product of the dimension values below kMaxValue: + // max(dimA) = kMaxValue / (min(dimB) * min(dimC) * ...) + int prod = 1; + for (const auto& var : evalOrder) prod *= (*context->at(var).committed.begin()); + for (auto& var : evalOrder) { + auto& committed = context->at(var).committed; + int maxValue = kMaxValue / (prod / *committed.begin()); + auto it = committed.upper_bound(maxValue); + // var has empty range -> no solution. + if (it == committed.begin()) return false; + // The range is not modified -> continue. + if (it == committed.end()) continue; + // The range is modified -> the subnet of var is dirty, i.e. needs re-evaluation. + committed.erase(it, committed.end()); + context->at(var).timestamp = RandomVariableNetwork::get()->getGlobalTime(); + dirtySubnets->insert(indexMap.at(var)); + } + } + return true; +} + +bool RandomVariableNetwork::evalRange() { + constexpr uint64_t kMaxNumCombinationsWithBruteForce = 500; + constexpr uint64_t kMaxNumCombinationsWithLocalNetwork = 1e5; + NN_FUZZER_LOG << "Evaluate on " << mEvalOrderMap.size() << " sub-networks"; + EvalContext context; + std::set<int> dirtySubnets; // Which subnets needs evaluation. + for (auto& pair : mEvalOrderMap) { + const auto& evalOrder = pair.second; + // Decide whether needs evaluation by timestamp -- if no range has changed after the last + // evaluation, then the subnet does not need re-evaluation. + if (evalOrder.size() == 1 || !needEvaluate(evalOrder, mTimestamp)) continue; + dirtySubnets.insert(pair.first); + } + if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false; + + // Repeat until the ranges converge. + while (!dirtySubnets.empty()) { + for (int ind : dirtySubnets) { + const auto& evalOrder = mEvalOrderMap[ind]; + NN_FUZZER_LOG << " Sub-network #" << ind << " size = " << evalOrder.size(); + + // Initialize EvalInfo of each RandomVariable. + for (auto& var : evalOrder) { + if (context.find(var) == context.end()) context.emplace(var, var); + NN_FUZZER_LOG << " - " << toString(var, &context); + } + + // Dispatch to different algorithm according to search range. + bool success; + uint64_t numCombinations = getNumCombinations(evalOrder); + if (numCombinations <= kMaxNumCombinationsWithBruteForce) { + success = evalSubnetWithBruteForce(evalOrder, &context); + } else if (numCombinations <= kMaxNumCombinationsWithLocalNetwork) { + success = evalSubnetWithLocalNetwork(evalOrder, mTimestamp, &context); + } else { + success = evalSubnetWithLeafNetwork(evalOrder, mTimestamp, &context); + } + if (!success) return false; + } + dirtySubnets.clear(); + if (!enforceDimProd(mDimProd, mIndexMap, &context, &dirtySubnets)) return false; + } + // A successful evaluation, update RandomVariables from EvalContext. + for (auto& pair : context) pair.second.updateRange(); + mTimestamp = getGlobalTime(); + NN_FUZZER_LOG << "Finish range evaluation"; + return true; +} + +static void unsetEqual(const RandomVariableNode& node) { + if (node == nullptr) return; + NN_FUZZER_LOG << "Unset equality of var" << node->index; + auto weakPtrEqual = [&node](const std::weak_ptr<RandomVariableBase>& ptr) { + return ptr.lock() == node; + }; + RandomVariableNode parent1 = node->parent1, parent2 = node->parent2; + parent1->children.erase( + std::find_if(parent1->children.begin(), parent1->children.end(), weakPtrEqual)); + node->parent1 = nullptr; + if (parent2 != nullptr) { + // For Equal. + parent2->children.erase( + std::find_if(parent2->children.begin(), parent2->children.end(), weakPtrEqual)); + node->parent2 = nullptr; + } else { + // For UnaryEqual. + node->type = RandomVariableType::FREE; + node->op = nullptr; + } +} + +// A class to revert all the changes made to RandomVariableNetwork since the Reverter object is +// constructed. Only used when setEqualIfCompatible results in incompatible. +class RandomVariableNetwork::Reverter { + public: + // Take a snapshot of RandomVariableNetwork when Reverter is constructed. + Reverter() : mSnapshot(*RandomVariableNetwork::get()) {} + // Add constraint (Equal) nodes to the reverter. + void addNode(const RandomVariableNode& node) { mEqualNodes.push_back(node); } + void revert() { + NN_FUZZER_LOG << "Revert RandomVariableNetwork"; + // Release the constraints. + for (const auto& node : mEqualNodes) unsetEqual(node); + // Reset all member variables. + *RandomVariableNetwork::get() = std::move(mSnapshot); + } + + private: + Reverter(const Reverter&) = delete; + Reverter& operator=(const Reverter&) = delete; + RandomVariableNetwork mSnapshot; + std::vector<RandomVariableNode> mEqualNodes; +}; + +bool RandomVariableNetwork::setEqualIfCompatible(const std::vector<RandomVariable>& lhs, + const std::vector<RandomVariable>& rhs) { + NN_FUZZER_LOG << "Check compatibility of {" << joinStr(", ", lhs) << "} and {" + << joinStr(", ", rhs) << "}"; + if (lhs.size() != rhs.size()) return false; + Reverter reverter; + bool result = true; + for (size_t i = 0; i < lhs.size(); i++) { + auto node = lhs[i].setEqual(rhs[i]).get(); + reverter.addNode(node); + // Early terminate if there is no common choice between two ranges. + if (node != nullptr && node->range.empty()) result = false; + } + result = result && evalRange(); + if (!result) reverter.revert(); + NN_FUZZER_LOG << "setEqualIfCompatible: " << (result ? "[COMPATIBLE]" : "[INCOMPATIBLE]"); + return result; +} + +bool RandomVariableNetwork::freeze() { + NN_FUZZER_LOG << "Freeze the random network"; + if (!evalRange()) return false; + + std::vector<RandomVariableNode> nodes; + for (const auto& pair : mEvalOrderMap) { + // Find all FREE RandomVariables in the subnet. + for (const auto& var : pair.second) { + if (var->type == RandomVariableType::FREE) nodes.push_back(var); + } + } + + // Randomly shuffle the order, this is for a more uniform randomness. + randomShuffle(&nodes); + + // An inefficient algorithm that does freeze -> re-evaluate for every FREE RandomVariable. + // TODO: Might be able to optimize this. + for (const auto& var : nodes) { + if (var->type != RandomVariableType::FREE) continue; + size_t size = var->range.size(); + NN_FUZZER_LOG << "Freeze " << toString(var); + var->freeze(); + NN_FUZZER_LOG << " " << toString(var); + // There is no need to re-evaluate if the FREE RandomVariable have only one choice. + if (size > 1) { + var->updateTimestamp(); + if (!evalRange()) { + NN_FUZZER_LOG << "Freeze failed at " << toString(var); + return false; + } + } + } + NN_FUZZER_LOG << "Finish freezing the random network"; + return true; +} + +} // namespace fuzzing_test +} // namespace nn +} // namespace android
diff --git a/runtime/test/fuzzing/RandomVariable.h b/runtime/test/fuzzing/RandomVariable.h index b9d3972..d510559 100644 --- a/runtime/test/fuzzing/RandomVariable.h +++ b/runtime/test/fuzzing/RandomVariable.h
@@ -135,7 +135,7 @@ // provides useful methods and operator overloading to build the random variable network. class RandomVariable { public: - // Construct a placeholder RandomVariable with nullptr. + // Construct a dummy RandomVariable with nullptr. RandomVariable() : mVar(nullptr) {} // Construct a CONST RandomVariable with specified value. @@ -161,8 +161,8 @@ // Get the underlying managed RandomVariableNode. RandomVariableNode get() const { return mVar; }; - bool operator==(std::nullptr_t) const { return mVar == nullptr; } - bool operator!=(std::nullptr_t) const { return mVar != nullptr; } + bool operator==(nullptr_t) const { return mVar == nullptr; } + bool operator!=(nullptr_t) const { return mVar != nullptr; } // Arithmetic operators and methods on RandomVariables. friend RandomVariable operator+(const RandomVariable& lhs, const RandomVariable& rhs);
diff --git a/runtime/test/fuzzing/TestRandomGraph.cpp b/runtime/test/fuzzing/TestRandomGraph.cpp index 34eb9f3..fc73bc2 100644 --- a/runtime/test/fuzzing/TestRandomGraph.cpp +++ b/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -32,16 +32,16 @@ #include "fuzzing/RandomGraphGeneratorUtils.h" #ifndef NNTEST_CTS -#include <HalInterfaces.h> -#include <SampleDriverFull.h> #include <memunreachable/memunreachable.h> #include <vector> -#include "HalUtils.h" +#include "HalInterfaces.h" #include "Manager.h" +#include "SampleDriverFull.h" using android::nn::sample_driver::SampleDriverFull; +using namespace android::nn::hal; #endif @@ -66,27 +66,27 @@ TestDriverV1_1() : mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.8f, .powerUsage = 0.8f})) {} static constexpr char name[] = "TestDriverV1_1"; - hardware::Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { + Return<void> getCapabilities_1_1(getCapabilities_1_1_cb _hidl_cb) override { return mDriverV1_2->getCapabilities_1_1(_hidl_cb); } - hardware::Return<void> getSupportedOperations_1_1( - const V1_1::Model& model, getSupportedOperations_1_1_cb _hidl_cb) override { + Return<void> getSupportedOperations_1_1(const V1_1::Model& model, + getSupportedOperations_1_1_cb _hidl_cb) override { return mDriverV1_2->getSupportedOperations_1_1(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel_1_1( - const V1_1::Model& model, V1_1::ExecutionPreference preference, + Return<V1_0::ErrorStatus> prepareModel_1_1( + const V1_1::Model& model, ExecutionPreference preference, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mDriverV1_2->prepareModel_1_1(model, preference, actualCallback); } - hardware::Return<V1_0::DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); } - hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { + Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); } + Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { return mDriverV1_2->getCapabilities(_hidl_cb); } - hardware::Return<void> getSupportedOperations(const V1_0::Model& model, - getSupportedOperations_cb _hidl_cb) override { + Return<void> getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb _hidl_cb) override { return mDriverV1_2->getSupportedOperations(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel( + Return<V1_0::ErrorStatus> prepareModel( const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mDriverV1_2->prepareModel(model, actualCallback); @@ -102,24 +102,29 @@ TestDriverV1_0() : mDriverV1_2(new SampleDriverFull(name, {.execTime = 0.7f, .powerUsage = 0.7f})) {} static constexpr char name[] = "TestDriverV1_0"; - hardware::Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { + Return<void> getCapabilities(getCapabilities_cb _hidl_cb) override { return mDriverV1_2->getCapabilities(_hidl_cb); } - hardware::Return<void> getSupportedOperations(const V1_0::Model& model, - getSupportedOperations_cb _hidl_cb) override { + Return<void> getSupportedOperations(const V1_0::Model& model, + getSupportedOperations_cb _hidl_cb) override { return mDriverV1_2->getSupportedOperations(model, _hidl_cb); } - hardware::Return<V1_0::ErrorStatus> prepareModel( + Return<V1_0::ErrorStatus> prepareModel( const V1_0::Model& model, const sp<V1_0::IPreparedModelCallback>& actualCallback) override { return mDriverV1_2->prepareModel(model, actualCallback); } - hardware::Return<V1_0::DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); } + Return<DeviceStatus> getStatus() override { return mDriverV1_2->getStatus(); } private: const sp<V1_2::IDevice> mDriverV1_2; }; +template <class T_TestDriver> +std::shared_ptr<Device> makeTestDevice() { + return DeviceManager::forTest_makeDriverDevice(T_TestDriver::name, new T_TestDriver); +} + #endif // NN API fuzzer logging setting comes from system property debug.nn.fuzzer.log and @@ -143,12 +148,9 @@ mDetectMemoryLeak = ::android::base::GetProperty("debug.nn.fuzzer.detectleak", "") == "1"; mStandardDevices = DeviceManager::get()->forTest_getDevices(); - mSyntheticDevices.push_back(DeviceManager::forTest_makeDriverDevice( - makeSharedDevice(TestDriverV1_2::name, new TestDriverV1_2))); - mSyntheticDevices.push_back(DeviceManager::forTest_makeDriverDevice( - makeSharedDevice(TestDriverV1_1::name, new TestDriverV1_1))); - mSyntheticDevices.push_back(DeviceManager::forTest_makeDriverDevice( - makeSharedDevice(TestDriverV1_0::name, new TestDriverV1_0))); + mSyntheticDevices.push_back(makeTestDevice<TestDriverV1_2>()); + mSyntheticDevices.push_back(makeTestDevice<TestDriverV1_1>()); + mSyntheticDevices.push_back(makeTestDevice<TestDriverV1_0>()); #endif mVndkVersion = ::android::base::GetIntProperty("ro.vndk.version", __ANDROID_API_FUTURE__); @@ -815,8 +817,8 @@ TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 2); TEST_RANDOM_GRAPH_WITH_DATA_TYPE_AND_RANK(TENSOR_BOOL8, 1); -INSTANTIATE_TEST_SUITE_P(TestRandomGraph, SingleOperationTest, ::testing::Range(0u, 50u)); -INSTANTIATE_TEST_SUITE_P(TestRandomGraph, RandomGraphTest, ::testing::Range(0u, 50u)); +INSTANTIATE_TEST_CASE_P(TestRandomGraph, SingleOperationTest, ::testing::Range(0u, 50u)); +INSTANTIATE_TEST_CASE_P(TestRandomGraph, RandomGraphTest, ::testing::Range(0u, 50u)); } // namespace fuzzing_test } // namespace nn
diff --git a/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp b/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp index 6498cae..b4177ff 100644 --- a/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp +++ b/runtime/test/fuzzing/operation_signatures/BoundingBox.cpp
@@ -17,8 +17,6 @@ #include <algorithm> #include <vector> -#include "TestHarness.h" -#include "fuzzing/RandomGraphGeneratorUtils.h" #include "fuzzing/operation_signatures/OperationSignatureUtils.h" namespace android { @@ -73,8 +71,6 @@ template <typename T> inline void fillRoiTensor(uint32_t numRois, T maxH, T maxW, RandomOperand* op) { - NN_FUZZER_CHECK(!op->buffer.empty()) - << "Trying to fill ROI tensor but the underlying buffer has not been allocated"; for (uint32_t i = 0; i < numRois; i++) { T low = getUniform<T>(0, maxW); op->value<T>(i * 4) = low; @@ -181,7 +177,6 @@ static void heatmapMaxKeypointConstructor(TestOperandType, uint32_t rank, RandomOperation* op) { NN_FUZZER_CHECK(rank == 4); - bool useNchw = op->inputs[2]->value<bool8>(); RandomVariable heatmapSize = RandomVariableType::FREE; RandomVariable numRois = RandomVariableType::FREE; @@ -197,10 +192,6 @@ op->outputs[0]->dimensions = {numRois, numKeypoints}; op->outputs[1]->dimensions = {numRois, numKeypoints, 2}; - // The values of the RoI tensor has a special format and cannot be generated from another - // operation. - op->inputs[1]->doNotConnect = true; - // TODO: This is an ugly fix due to the limitation of the current generator that can not handle // the dimension dependency within an input. Without the following line, most of the generated // HEATMAP_MAX_KEYPOINT graphs will be invalid and triggers retry.
diff --git a/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp b/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp index 1522cf4..22020cf 100644 --- a/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp +++ b/runtime/test/fuzzing/operation_signatures/ConcatSplit.cpp
@@ -120,9 +120,9 @@ // SPLIT with fixed number of splits. static void splitConstructor(uint32_t numSplits, uint32_t rank, RandomOperation* op) { - int32_t axis = getRandomAxis(rank); + int32_t axis = getUniform<int32_t>(-rank, rank - 1); op->inputs[1]->setScalarValue<int32_t>(axis); - axis = toPositiveAxis(axis, rank); + if (axis < 0) axis += rank; op->inputs[0]->dimensions.resize(rank); for (uint32_t i = 0; i < numSplits; i++) {
diff --git a/runtime/test/fuzzing/operation_signatures/Normalization.cpp b/runtime/test/fuzzing/operation_signatures/Normalization.cpp index 5d1da4b..89ba360 100644 --- a/runtime/test/fuzzing/operation_signatures/Normalization.cpp +++ b/runtime/test/fuzzing/operation_signatures/Normalization.cpp
@@ -24,7 +24,7 @@ sameDimensionOpConstructor(dataType, rank, op); // Generate value for "axis" parameter. if (op->inputs.size() > 2) { - op->inputs[2]->setScalarValue<int32_t>(getRandomAxis(rank)); + op->inputs[2]->setScalarValue<int32_t>(getUniform<int32_t>(-rank, rank - 1)); } } @@ -92,7 +92,7 @@ sameDimensionOpConstructor(dataType, rank, op); // Generate value for "axis" parameter. if (op->inputs.size() > 1) { - op->inputs[1]->setScalarValue<int32_t>(getRandomAxis(rank)); + op->inputs[1]->setScalarValue<int32_t>(getUniform<int32_t>(-rank, rank - 1)); } // L2_NORMALIZATION may produce NaN output values with all zero inputs. We should not connect // the output tensor to the input of another operation. @@ -160,7 +160,7 @@ sameDimensionOpConstructor(dataType, rank, op); // Generate value for "axis" parameter. if (op->inputs.size() > 5) { - op->inputs[5]->setScalarValue<int32_t>(getRandomAxis(rank)); + op->inputs[5]->setScalarValue<int32_t>(getUniform<int32_t>(-rank, rank - 1)); } }
diff --git a/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h b/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h index 53b5aad..74a5ae4 100644 --- a/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h +++ b/runtime/test/fuzzing/operation_signatures/OperationSignatureUtils.h
@@ -165,16 +165,6 @@ } } -// Get a random value between [-rank, rank) for the "axis" parameter of NNAPI operations. -inline int32_t getRandomAxis(int32_t rank) { - return getUniform(-rank, rank - 1); -} - -// Convert a potentially negative axis index to the equivalent positive axis index. -inline int32_t toPositiveAxis(int32_t axis, int32_t rank) { - return axis >= 0 ? axis : axis + rank; -} - // A helper struct for DEFINE_OPERATION_SIGNATURE macro. struct OperationSignatureHelper { std::string name; @@ -310,7 +300,7 @@ op->zeroPoint = 0; break; default: - NN_FUZZER_CHECK(false) << "Data type " << dataType + NN_FUZZER_CHECK(false) << "Data type " << toString(dataType) << " is not supported in defaultScalarOperandConstructor."; } }
diff --git a/runtime/test/fuzzing/operation_signatures/Reduce.cpp b/runtime/test/fuzzing/operation_signatures/Reduce.cpp index 994f086..a3bad35 100644 --- a/runtime/test/fuzzing/operation_signatures/Reduce.cpp +++ b/runtime/test/fuzzing/operation_signatures/Reduce.cpp
@@ -31,9 +31,9 @@ op->inputs[1]->dimensions = {numAxis}; op->inputs[1]->resizeBuffer<int32_t>(numAxis); for (uint32_t i = 0; i < numAxis; i++) { - int32_t dim = getRandomAxis(rank); + int32_t dim = getUniform<int32_t>(-rank, rank - 1); op->inputs[1]->value<int32_t>(i) = dim; - reduce[toPositiveAxis(dim, rank)] = true; + reduce[dim < 0 ? dim + rank : dim] = true; } // This scalar may have two types: in MEAN it is INT32, in REDUCE_* it is BOOL @@ -103,10 +103,10 @@ setFreeDimensions(op->inputs[0], rank); // "axis" must be in the range [-rank, rank). // Negative "axis" is used to specify axis from the end. - int32_t axis = getRandomAxis(rank); + int32_t axis = getUniform<int32_t>(-rank, rank - 1); op->inputs[1]->setScalarValue<int32_t>(axis); for (uint32_t i = 0; i < rank; i++) { - if (i != static_cast<uint32_t>(toPositiveAxis(axis, rank))) { + if (i != static_cast<uint32_t>(axis) && i != axis + rank) { op->outputs[0]->dimensions.emplace_back(op->inputs[0]->dimensions[i]); } }
diff --git a/runtime/test/fuzzing/operation_signatures/Reshape.cpp b/runtime/test/fuzzing/operation_signatures/Reshape.cpp index 63423c0..b50af38 100644 --- a/runtime/test/fuzzing/operation_signatures/Reshape.cpp +++ b/runtime/test/fuzzing/operation_signatures/Reshape.cpp
@@ -425,10 +425,10 @@ RandomOperation* op) { sameShapeOpConstructor(dataType, rank, op); // The number of groups must be a divisor of the target axis size. - int32_t axis = getRandomAxis(rank); + int32_t axis = getUniform<int32_t>(-rank, rank - 1); op->inputs[2]->setScalarValue<int32_t>(axis); int32_t numGroups = op->inputs[1]->value<int32_t>(); - axis = toPositiveAxis(axis, rank); + if (axis < 0) axis += rank; (op->inputs[0]->dimensions[axis] % numGroups).setEqual(0); } @@ -519,9 +519,9 @@ static void expandDimsConstructor(TestOperandType, uint32_t rank, RandomOperation* op) { // Generate values for the "axis" tensor. - int32_t axis = getRandomAxis(rank + 1); + int32_t axis = getUniform<int32_t>(-rank - 1, rank); op->inputs[1]->setScalarValue<int32_t>(axis); - if (axis < 0) axis += static_cast<int32_t>(rank + 1); + if (axis < 0) axis += rank + 1; setFreeDimensions(op->inputs[0], rank); for (uint32_t i = 0; i < rank; i++) {
diff --git a/runtime/test/fuzzing/operation_signatures/Selection.cpp b/runtime/test/fuzzing/operation_signatures/Selection.cpp index 515fc0c..02612c0 100644 --- a/runtime/test/fuzzing/operation_signatures/Selection.cpp +++ b/runtime/test/fuzzing/operation_signatures/Selection.cpp
@@ -114,9 +114,9 @@ static void gatherConstructor(TestOperandType, uint32_t rank, RandomOperation* op) { // Generate value for "axis" scalar. - int32_t axis = getRandomAxis(rank); + int32_t axis = getUniform<int32_t>(-rank, rank - 1); op->inputs[1]->setScalarValue<int32_t>(axis); - axis = toPositiveAxis(axis, rank); + if (axis < 0) axis += rank; // Set dimensions for input and indices tensor. uint32_t indRank = getUniform<uint32_t>(1, 5); @@ -137,7 +137,7 @@ static void gatherFinalizer(RandomOperation* op) { int32_t axis = op->inputs[1]->value<int32_t>(); - axis = toPositiveAxis(axis, op->inputs[0]->dimensions.size()); + if (axis < 0) axis += op->inputs[0]->dimensions.size(); uint32_t dimValue = op->inputs[0]->dimensions[axis].getValue(); uint32_t numElements = op->inputs[2]->getNumberOfElements(); for (uint32_t i = 0; i < numElements; i++) { @@ -238,15 +238,6 @@ uint32_t rank = op->inputs[0]->dimensions.size(); int32_t* begin = reinterpret_cast<int32_t*>(op->inputs[1]->buffer.data()); int32_t* size = reinterpret_cast<int32_t*>(op->inputs[2]->buffer.data()); - - NN_FUZZER_CHECK(op->inputs[1]->buffer.size() >= rank) - << "input[1] buffer size " << op->inputs[1]->buffer.size() << " is smaller than rank " - << rank; - - NN_FUZZER_CHECK(op->inputs[2]->buffer.size() >= rank) - << "input[1] buffer size " << op->inputs[2]->buffer.size() << " is smaller than rank " - << rank; - for (uint32_t i = 0; i < rank; i++) { int32_t inputSize = op->inputs[0]->dimensions[i].getValue(); int32_t outputSize = op->outputs[0]->dimensions[i].getValue(); @@ -307,15 +298,6 @@ int32_t* begin = reinterpret_cast<int32_t*>(op->inputs[1]->buffer.data()); int32_t* end = reinterpret_cast<int32_t*>(op->inputs[2]->buffer.data()); std::vector<bool> beginMask(rank, false), endMask(rank, false); - - NN_FUZZER_CHECK(op->inputs[1]->buffer.size() >= rank) - << "input[1] buffer size " << op->inputs[1]->buffer.size() << " is smaller than rank " - << rank; - - NN_FUZZER_CHECK(op->inputs[2]->buffer.size() >= rank) - << "input[1] buffer size " << op->inputs[2]->buffer.size() << " is smaller than rank " - << rank; - int32_t shrinkMask = op->inputs[6]->value<int32_t>(); for (uint32_t i = 0, o = 0; i < rank; i++) { int32_t inputSize = op->inputs[0]->dimensions[i].getValue();
diff --git a/runtime/test/generated/spec_V1_0/add.example.cpp b/runtime/test/generated/spec_V1_0/add.example.cpp index 9af0455..eff273a 100644 --- a/runtime/test/generated/spec_V1_0/add.example.cpp +++ b/runtime/test/generated/spec_V1_0/add.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/add_broadcast_quant8.example.cpp b/runtime/test/generated/spec_V1_0/add_broadcast_quant8.example.cpp index 270ba06..118e35a 100644 --- a/runtime/test/generated/spec_V1_0/add_broadcast_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_0/add_broadcast_quant8.example.cpp
@@ -130,7 +130,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/add_quant8.example.cpp b/runtime/test/generated/spec_V1_0/add_quant8.example.cpp index 1034c85..c15decb 100644 --- a/runtime/test/generated/spec_V1_0/add_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_0/add_quant8.example.cpp
@@ -130,7 +130,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/avg_pool_float_1.example.cpp b/runtime/test/generated/spec_V1_0/avg_pool_float_1.example.cpp index 04e5585..17aac62 100644 --- a/runtime/test/generated/spec_V1_0/avg_pool_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/avg_pool_float_1.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/avg_pool_float_2.example.cpp b/runtime/test/generated/spec_V1_0/avg_pool_float_2.example.cpp index 215df10..fc55bb2 100644 --- a/runtime/test/generated/spec_V1_0/avg_pool_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/avg_pool_float_2.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/avg_pool_float_3.example.cpp b/runtime/test/generated/spec_V1_0/avg_pool_float_3.example.cpp index 6d782ad..f307bc0 100644 --- a/runtime/test/generated/spec_V1_0/avg_pool_float_3.example.cpp +++ b/runtime/test/generated/spec_V1_0/avg_pool_float_3.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/avg_pool_float_4.example.cpp b/runtime/test/generated/spec_V1_0/avg_pool_float_4.example.cpp index 19393ce..a5a2fa8 100644 --- a/runtime/test/generated/spec_V1_0/avg_pool_float_4.example.cpp +++ b/runtime/test/generated/spec_V1_0/avg_pool_float_4.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/avg_pool_float_5.example.cpp b/runtime/test/generated/spec_V1_0/avg_pool_float_5.example.cpp index bad3846..ee5cbcc 100644 --- a/runtime/test/generated/spec_V1_0/avg_pool_float_5.example.cpp +++ b/runtime/test/generated/spec_V1_0/avg_pool_float_5.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/avg_pool_quant8_1.example.cpp b/runtime/test/generated/spec_V1_0/avg_pool_quant8_1.example.cpp index 42fe535..4de218d 100644 --- a/runtime/test/generated/spec_V1_0/avg_pool_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/avg_pool_quant8_1.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/avg_pool_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/avg_pool_quant8_2.example.cpp index 5ba022b..0a4689d 100644 --- a/runtime/test/generated/spec_V1_0/avg_pool_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/avg_pool_quant8_2.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/avg_pool_quant8_3.example.cpp b/runtime/test/generated/spec_V1_0/avg_pool_quant8_3.example.cpp index 93fdff5..314c575 100644 --- a/runtime/test/generated/spec_V1_0/avg_pool_quant8_3.example.cpp +++ b/runtime/test/generated/spec_V1_0/avg_pool_quant8_3.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/avg_pool_quant8_4.example.cpp b/runtime/test/generated/spec_V1_0/avg_pool_quant8_4.example.cpp index e090c59..efe44a0 100644 --- a/runtime/test/generated/spec_V1_0/avg_pool_quant8_4.example.cpp +++ b/runtime/test/generated/spec_V1_0/avg_pool_quant8_4.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/avg_pool_quant8_5.example.cpp b/runtime/test/generated/spec_V1_0/avg_pool_quant8_5.example.cpp index 056c2c3..df049ca 100644 --- a/runtime/test/generated/spec_V1_0/avg_pool_quant8_5.example.cpp +++ b/runtime/test/generated/spec_V1_0/avg_pool_quant8_5.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0625f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/concat_float_1.example.cpp b/runtime/test/generated/spec_V1_0/concat_float_1.example.cpp index 56d47fb..6cc440a 100644 --- a/runtime/test/generated/spec_V1_0/concat_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/concat_float_1.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/concat_float_2.example.cpp b/runtime/test/generated/spec_V1_0/concat_float_2.example.cpp index 4a27714..42c0b42 100644 --- a/runtime/test/generated/spec_V1_0/concat_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/concat_float_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/concat_float_3.example.cpp b/runtime/test/generated/spec_V1_0/concat_float_3.example.cpp index 41e84f4..e74d6fb 100644 --- a/runtime/test/generated/spec_V1_0/concat_float_3.example.cpp +++ b/runtime/test/generated/spec_V1_0/concat_float_3.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/concat_quant8_1.example.cpp b/runtime/test/generated/spec_V1_0/concat_quant8_1.example.cpp index 9071deb..311d41f 100644 --- a/runtime/test/generated/spec_V1_0/concat_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/concat_quant8_1.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/concat_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/concat_quant8_2.example.cpp index 17f7cc4..32b4fab 100644 --- a/runtime/test/generated/spec_V1_0/concat_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/concat_quant8_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/concat_quant8_3.example.cpp b/runtime/test/generated/spec_V1_0/concat_quant8_3.example.cpp index ffbf124..cb5f90f 100644 --- a/runtime/test/generated/spec_V1_0/concat_quant8_3.example.cpp +++ b/runtime/test/generated/spec_V1_0/concat_quant8_3.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_1_h3_w2_SAME.example.cpp b/runtime/test/generated/spec_V1_0/conv_1_h3_w2_SAME.example.cpp index 0a68d37..99a974a 100644 --- a/runtime/test/generated/spec_V1_0/conv_1_h3_w2_SAME.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_1_h3_w2_SAME.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -774,7 +774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1022,7 +1022,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1052,7 +1052,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1082,7 +1082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_1_h3_w2_VALID.example.cpp b/runtime/test/generated/spec_V1_0/conv_1_h3_w2_VALID.example.cpp index adea823..0196d45 100644 --- a/runtime/test/generated/spec_V1_0/conv_1_h3_w2_VALID.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_1_h3_w2_VALID.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -774,7 +774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1022,7 +1022,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1052,7 +1052,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1082,7 +1082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_3_h3_w2_SAME.example.cpp b/runtime/test/generated/spec_V1_0/conv_3_h3_w2_SAME.example.cpp index b6d7a93..303f796 100644 --- a/runtime/test/generated/spec_V1_0/conv_3_h3_w2_SAME.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_3_h3_w2_SAME.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -774,7 +774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1022,7 +1022,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1052,7 +1052,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1082,7 +1082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_3_h3_w2_VALID.example.cpp b/runtime/test/generated/spec_V1_0/conv_3_h3_w2_VALID.example.cpp index 5e0eb61..c48ba5a 100644 --- a/runtime/test/generated/spec_V1_0/conv_3_h3_w2_VALID.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_3_h3_w2_VALID.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -774,7 +774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1022,7 +1022,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1052,7 +1052,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1082,7 +1082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_float.example.cpp b/runtime/test/generated/spec_V1_0/conv_float.example.cpp index db5a6fb..8b73bbd 100644 --- a/runtime/test/generated/spec_V1_0/conv_float.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_float.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -448,7 +448,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -478,7 +478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_float_2.example.cpp b/runtime/test/generated/spec_V1_0/conv_float_2.example.cpp index 9c6db74..5be9513 100644 --- a/runtime/test/generated/spec_V1_0/conv_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_float_2.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -448,7 +448,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -478,7 +478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_float_channels.example.cpp b/runtime/test/generated/spec_V1_0/conv_float_channels.example.cpp index 4e9ed4c..65e242e 100644 --- a/runtime/test/generated/spec_V1_0/conv_float_channels.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_float_channels.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -448,7 +448,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -478,7 +478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_float_channels_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/conv_float_channels_weights_as_inputs.example.cpp index 328df8a..93a88bf 100644 --- a/runtime/test/generated/spec_V1_0/conv_float_channels_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_float_channels_weights_as_inputs.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -220,7 +220,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -250,7 +250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_float_large.example.cpp b/runtime/test/generated/spec_V1_0/conv_float_large.example.cpp index c0b8c7f..8a41f38 100644 --- a/runtime/test/generated/spec_V1_0/conv_float_large.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_float_large.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -448,7 +448,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -478,7 +478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_float_large_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/conv_float_large_weights_as_inputs.example.cpp index 74eef68..2004a0f 100644 --- a/runtime/test/generated/spec_V1_0/conv_float_large_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_float_large_weights_as_inputs.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -220,7 +220,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -250,7 +250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_float_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/conv_float_weights_as_inputs.example.cpp index 626c924..41b1070 100644 --- a/runtime/test/generated/spec_V1_0/conv_float_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_float_weights_as_inputs.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -220,7 +220,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -250,7 +250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_quant8.example.cpp b/runtime/test/generated/spec_V1_0/conv_quant8.example.cpp index 18ac112..bcf6db8 100644 --- a/runtime/test/generated/spec_V1_0/conv_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_quant8.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -448,7 +448,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/conv_quant8_2.example.cpp index 7da6652..d9297c5 100644 --- a/runtime/test/generated/spec_V1_0/conv_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_quant8_2.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_quant8_channels.example.cpp b/runtime/test/generated/spec_V1_0/conv_quant8_channels.example.cpp index 37ce80f..c353f83 100644 --- a/runtime/test/generated/spec_V1_0/conv_quant8_channels.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_quant8_channels.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -448,7 +448,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_quant8_channels_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/conv_quant8_channels_weights_as_inputs.example.cpp index 0866a19..fe9d025 100644 --- a/runtime/test/generated/spec_V1_0/conv_quant8_channels_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_quant8_channels_weights_as_inputs.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -220,7 +220,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_quant8_large.example.cpp b/runtime/test/generated/spec_V1_0/conv_quant8_large.example.cpp index 0e1a34c..464bca9 100644 --- a/runtime/test/generated/spec_V1_0/conv_quant8_large.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_quant8_large.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -448,7 +448,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_quant8_large_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/conv_quant8_large_weights_as_inputs.example.cpp index 7fd4935..1bae3d4 100644 --- a/runtime/test/generated/spec_V1_0/conv_quant8_large_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_quant8_large_weights_as_inputs.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -220,7 +220,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_quant8_overflow.example.cpp b/runtime/test/generated/spec_V1_0/conv_quant8_overflow.example.cpp index b113ec0..d5e97f1 100644 --- a/runtime/test/generated/spec_V1_0/conv_quant8_overflow.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_quant8_overflow.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -448,7 +448,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_quant8_overflow_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/conv_quant8_overflow_weights_as_inputs.example.cpp index 271cbac..826c384 100644 --- a/runtime/test/generated/spec_V1_0/conv_quant8_overflow_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_quant8_overflow_weights_as_inputs.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -220,7 +220,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/conv_quant8_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/conv_quant8_weights_as_inputs.example.cpp index db1d98d..1cffaee 100644 --- a/runtime/test/generated/spec_V1_0/conv_quant8_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/conv_quant8_weights_as_inputs.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -220,7 +220,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depth_to_space_float_1.example.cpp b/runtime/test/generated/spec_V1_0/depth_to_space_float_1.example.cpp index 504ddab..1d76732 100644 --- a/runtime/test/generated/spec_V1_0/depth_to_space_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/depth_to_space_float_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depth_to_space_float_2.example.cpp b/runtime/test/generated/spec_V1_0/depth_to_space_float_2.example.cpp index 6cc356b..826b87d 100644 --- a/runtime/test/generated/spec_V1_0/depth_to_space_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/depth_to_space_float_2.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depth_to_space_float_3.example.cpp b/runtime/test/generated/spec_V1_0/depth_to_space_float_3.example.cpp index 21f3af2..3352148 100644 --- a/runtime/test/generated/spec_V1_0/depth_to_space_float_3.example.cpp +++ b/runtime/test/generated/spec_V1_0/depth_to_space_float_3.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depth_to_space_quant8_1.example.cpp b/runtime/test/generated/spec_V1_0/depth_to_space_quant8_1.example.cpp index ba8d451..3a629a8 100644 --- a/runtime/test/generated/spec_V1_0/depth_to_space_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/depth_to_space_quant8_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depth_to_space_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/depth_to_space_quant8_2.example.cpp index 0f9ecf4..2149427 100644 --- a/runtime/test/generated/spec_V1_0/depth_to_space_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/depth_to_space_quant8_2.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv.example.cpp index d1236ad..69c7225 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -498,7 +498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -528,7 +528,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -558,7 +558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -834,7 +834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1102,7 +1102,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1132,7 +1132,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1162,7 +1162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float.example.cpp index bc89895..0694c7e 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_2.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_2.example.cpp index f531ba4..41d3dfb 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_2.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large.example.cpp index 8f6d138..8b6f724 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large_2.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large_2.example.cpp index 523fc10..61a0bc2 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large_2.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp index c6aa014..def6bd1 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large_2_weights_as_inputs.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large_weights_as_inputs.example.cpp index a333445..43527fb 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_large_weights_as_inputs.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_weights_as_inputs.example.cpp index ac71174..2fab0ed 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv2d_float_weights_as_inputs.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8.example.cpp index 5a04252..1f9f61e 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_2.example.cpp index 33e3660..e8a5b69 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_2.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_large.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_large.example.cpp index 96590b5..a890b6e 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_large.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_large.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp index 6056739..401f1ea 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_large_weights_as_inputs.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_weights_as_inputs.example.cpp index a7a7d56..e30fdd4 100644 --- a/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/depthwise_conv2d_quant8_weights_as_inputs.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/dequantize.example.cpp b/runtime/test/generated/spec_V1_0/dequantize.example.cpp index 51075bb..2594435 100644 --- a/runtime/test/generated/spec_V1_0/dequantize.example.cpp +++ b/runtime/test/generated/spec_V1_0/dequantize.example.cpp
@@ -90,7 +90,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/embedding_lookup.example.cpp b/runtime/test/generated/spec_V1_0/embedding_lookup.example.cpp index 6bd38ed..2b8b48d 100644 --- a/runtime/test/generated/spec_V1_0/embedding_lookup.example.cpp +++ b/runtime/test/generated/spec_V1_0/embedding_lookup.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/floor.example.cpp b/runtime/test/generated/spec_V1_0/floor.example.cpp index 8bd54ad..b40c33d 100644 --- a/runtime/test/generated/spec_V1_0/floor.example.cpp +++ b/runtime/test/generated/spec_V1_0/floor.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/fully_connected_float.example.cpp b/runtime/test/generated/spec_V1_0/fully_connected_float.example.cpp index f83f1c5..7e6461e 100644 --- a/runtime/test/generated/spec_V1_0/fully_connected_float.example.cpp +++ b/runtime/test/generated/spec_V1_0/fully_connected_float.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -398,7 +398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/fully_connected_float_2.example.cpp b/runtime/test/generated/spec_V1_0/fully_connected_float_2.example.cpp index 696a907..95fc9c2 100644 --- a/runtime/test/generated/spec_V1_0/fully_connected_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/fully_connected_float_2.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -398,7 +398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/fully_connected_float_3.example.cpp b/runtime/test/generated/spec_V1_0/fully_connected_float_3.example.cpp index 6a433f5..17052b8 100644 --- a/runtime/test/generated/spec_V1_0/fully_connected_float_3.example.cpp +++ b/runtime/test/generated/spec_V1_0/fully_connected_float_3.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -398,7 +398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/fully_connected_float_large.example.cpp b/runtime/test/generated/spec_V1_0/fully_connected_float_large.example.cpp index dda0183..c1fe2f2 100644 --- a/runtime/test/generated/spec_V1_0/fully_connected_float_large.example.cpp +++ b/runtime/test/generated/spec_V1_0/fully_connected_float_large.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -398,7 +398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/fully_connected_float_large_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/fully_connected_float_large_weights_as_inputs.example.cpp index 165c614..a9f4090 100644 --- a/runtime/test/generated/spec_V1_0/fully_connected_float_large_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/fully_connected_float_large_weights_as_inputs.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -180,7 +180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/fully_connected_float_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/fully_connected_float_weights_as_inputs.example.cpp index 592e61d..f3f6867 100644 --- a/runtime/test/generated/spec_V1_0/fully_connected_float_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/fully_connected_float_weights_as_inputs.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -180,7 +180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/fully_connected_quant8.example.cpp b/runtime/test/generated/spec_V1_0/fully_connected_quant8.example.cpp index 4a30655..e46253b 100644 --- a/runtime/test/generated/spec_V1_0/fully_connected_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_0/fully_connected_quant8.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/fully_connected_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/fully_connected_quant8_2.example.cpp index 344643d..9fcc13d 100644 --- a/runtime/test/generated/spec_V1_0/fully_connected_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/fully_connected_quant8_2.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/fully_connected_quant8_large.example.cpp b/runtime/test/generated/spec_V1_0/fully_connected_quant8_large.example.cpp index 3ab2e2a..457fbaf 100644 --- a/runtime/test/generated/spec_V1_0/fully_connected_quant8_large.example.cpp +++ b/runtime/test/generated/spec_V1_0/fully_connected_quant8_large.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.2f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.2f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.2f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/fully_connected_quant8_large_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/fully_connected_quant8_large_weights_as_inputs.example.cpp index 1abf974..6fe105c 100644 --- a/runtime/test/generated/spec_V1_0/fully_connected_quant8_large_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/fully_connected_quant8_large_weights_as_inputs.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.2f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -180,7 +180,7 @@ .scale = 0.2f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/fully_connected_quant8_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/fully_connected_quant8_weights_as_inputs.example.cpp index 13fdad0..b590040 100644 --- a/runtime/test/generated/spec_V1_0/fully_connected_quant8_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/fully_connected_quant8_weights_as_inputs.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -180,7 +180,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/hashtable_lookup_float.example.cpp b/runtime/test/generated/spec_V1_0/hashtable_lookup_float.example.cpp index 8cf5a3f..9181949 100644 --- a/runtime/test/generated/spec_V1_0/hashtable_lookup_float.example.cpp +++ b/runtime/test/generated/spec_V1_0/hashtable_lookup_float.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/hashtable_lookup_quant8.example.cpp b/runtime/test/generated/spec_V1_0/hashtable_lookup_quant8.example.cpp index 96fad23..909c2af 100644 --- a/runtime/test/generated/spec_V1_0/hashtable_lookup_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_0/hashtable_lookup_quant8.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/l2_normalization.example.cpp b/runtime/test/generated/spec_V1_0/l2_normalization.example.cpp index 1b1c655..a8b5f39 100644 --- a/runtime/test/generated/spec_V1_0/l2_normalization.example.cpp +++ b/runtime/test/generated/spec_V1_0/l2_normalization.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/l2_normalization_2.example.cpp b/runtime/test/generated/spec_V1_0/l2_normalization_2.example.cpp index 0265cbe..8f5ba37 100644 --- a/runtime/test/generated/spec_V1_0/l2_normalization_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/l2_normalization_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/l2_normalization_large.example.cpp b/runtime/test/generated/spec_V1_0/l2_normalization_large.example.cpp index c414ce6..b1c490a 100644 --- a/runtime/test/generated/spec_V1_0/l2_normalization_large.example.cpp +++ b/runtime/test/generated/spec_V1_0/l2_normalization_large.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/l2_pool_float.example.cpp b/runtime/test/generated/spec_V1_0/l2_pool_float.example.cpp index 6eb01e6..dea5b61 100644 --- a/runtime/test/generated/spec_V1_0/l2_pool_float.example.cpp +++ b/runtime/test/generated/spec_V1_0/l2_pool_float.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/l2_pool_float_2.example.cpp b/runtime/test/generated/spec_V1_0/l2_pool_float_2.example.cpp index 9e1bb1b..6e2b131 100644 --- a/runtime/test/generated/spec_V1_0/l2_pool_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/l2_pool_float_2.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/l2_pool_float_large.example.cpp b/runtime/test/generated/spec_V1_0/l2_pool_float_large.example.cpp index 1b69554..a222fcd 100644 --- a/runtime/test/generated/spec_V1_0/l2_pool_float_large.example.cpp +++ b/runtime/test/generated/spec_V1_0/l2_pool_float_large.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/local_response_norm_float_1.example.cpp b/runtime/test/generated/spec_V1_0/local_response_norm_float_1.example.cpp index 97a218c..cbddf13 100644 --- a/runtime/test/generated/spec_V1_0/local_response_norm_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/local_response_norm_float_1.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/local_response_norm_float_4.example.cpp b/runtime/test/generated/spec_V1_0/local_response_norm_float_4.example.cpp index 5bcd921..24b9bb8 100644 --- a/runtime/test/generated/spec_V1_0/local_response_norm_float_4.example.cpp +++ b/runtime/test/generated/spec_V1_0/local_response_norm_float_4.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/logistic_float_1.example.cpp b/runtime/test/generated/spec_V1_0/logistic_float_1.example.cpp index 4805b29..8367a63 100644 --- a/runtime/test/generated/spec_V1_0/logistic_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/logistic_float_1.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/logistic_float_2.example.cpp b/runtime/test/generated/spec_V1_0/logistic_float_2.example.cpp index cb4878f..d2ef7f8 100644 --- a/runtime/test/generated/spec_V1_0/logistic_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/logistic_float_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/logistic_quant8_1.example.cpp b/runtime/test/generated/spec_V1_0/logistic_quant8_1.example.cpp index 7d29467..e2e08e8 100644 --- a/runtime/test/generated/spec_V1_0/logistic_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/logistic_quant8_1.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/logistic_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/logistic_quant8_2.example.cpp index 48005c8..57ecab1 100644 --- a/runtime/test/generated/spec_V1_0/logistic_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/logistic_quant8_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lsh_projection.example.cpp b/runtime/test/generated/spec_V1_0/lsh_projection.example.cpp index a8898a4..1ff5181 100644 --- a/runtime/test/generated/spec_V1_0/lsh_projection.example.cpp +++ b/runtime/test/generated/spec_V1_0/lsh_projection.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lsh_projection_2.example.cpp b/runtime/test/generated/spec_V1_0/lsh_projection_2.example.cpp index 0f458e9..3c03a76 100644 --- a/runtime/test/generated/spec_V1_0/lsh_projection_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/lsh_projection_2.example.cpp
@@ -227,7 +227,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lsh_projection_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/lsh_projection_weights_as_inputs.example.cpp index 54167d5..528f759 100644 --- a/runtime/test/generated/spec_V1_0/lsh_projection_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/lsh_projection_weights_as_inputs.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -180,7 +180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lstm.example.cpp b/runtime/test/generated/spec_V1_0/lstm.example.cpp index 65bf33e..3a35f76 100644 --- a/runtime/test/generated/spec_V1_0/lstm.example.cpp +++ b/runtime/test/generated/spec_V1_0/lstm.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lstm2.example.cpp b/runtime/test/generated/spec_V1_0/lstm2.example.cpp index 40b4cf3..144a57d 100644 --- a/runtime/test/generated/spec_V1_0/lstm2.example.cpp +++ b/runtime/test/generated/spec_V1_0/lstm2.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lstm2_state.example.cpp b/runtime/test/generated/spec_V1_0/lstm2_state.example.cpp index 0841d4e..654788a 100644 --- a/runtime/test/generated/spec_V1_0/lstm2_state.example.cpp +++ b/runtime/test/generated/spec_V1_0/lstm2_state.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lstm2_state2.example.cpp b/runtime/test/generated/spec_V1_0/lstm2_state2.example.cpp index 9ce97b5..b8add07 100644 --- a/runtime/test/generated/spec_V1_0/lstm2_state2.example.cpp +++ b/runtime/test/generated/spec_V1_0/lstm2_state2.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lstm3.example.cpp b/runtime/test/generated/spec_V1_0/lstm3.example.cpp index 1913da0..0740717 100644 --- a/runtime/test/generated/spec_V1_0/lstm3.example.cpp +++ b/runtime/test/generated/spec_V1_0/lstm3.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1070,7 +1070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1130,7 +1130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lstm3_state.example.cpp b/runtime/test/generated/spec_V1_0/lstm3_state.example.cpp index c5e856f..dc280e1 100644 --- a/runtime/test/generated/spec_V1_0/lstm3_state.example.cpp +++ b/runtime/test/generated/spec_V1_0/lstm3_state.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1070,7 +1070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1130,7 +1130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lstm3_state2.example.cpp b/runtime/test/generated/spec_V1_0/lstm3_state2.example.cpp index e5bc7c6..a8064f0 100644 --- a/runtime/test/generated/spec_V1_0/lstm3_state2.example.cpp +++ b/runtime/test/generated/spec_V1_0/lstm3_state2.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1070,7 +1070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1130,7 +1130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lstm3_state3.example.cpp b/runtime/test/generated/spec_V1_0/lstm3_state3.example.cpp index 98b2813..b7d9bed 100644 --- a/runtime/test/generated/spec_V1_0/lstm3_state3.example.cpp +++ b/runtime/test/generated/spec_V1_0/lstm3_state3.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1070,7 +1070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1130,7 +1130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lstm_state.example.cpp b/runtime/test/generated/spec_V1_0/lstm_state.example.cpp index e98f130..54e1b2b 100644 --- a/runtime/test/generated/spec_V1_0/lstm_state.example.cpp +++ b/runtime/test/generated/spec_V1_0/lstm_state.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/lstm_state2.example.cpp b/runtime/test/generated/spec_V1_0/lstm_state2.example.cpp index 7bb44b9..3b526bf 100644 --- a/runtime/test/generated/spec_V1_0/lstm_state2.example.cpp +++ b/runtime/test/generated/spec_V1_0/lstm_state2.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/max_pool_float_1.example.cpp b/runtime/test/generated/spec_V1_0/max_pool_float_1.example.cpp index 008b7ec..40efb31 100644 --- a/runtime/test/generated/spec_V1_0/max_pool_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/max_pool_float_1.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/max_pool_float_2.example.cpp b/runtime/test/generated/spec_V1_0/max_pool_float_2.example.cpp index 0234d15..a779d69 100644 --- a/runtime/test/generated/spec_V1_0/max_pool_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/max_pool_float_2.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/max_pool_float_3.example.cpp b/runtime/test/generated/spec_V1_0/max_pool_float_3.example.cpp index 33ce75c..01fc67b 100644 --- a/runtime/test/generated/spec_V1_0/max_pool_float_3.example.cpp +++ b/runtime/test/generated/spec_V1_0/max_pool_float_3.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/max_pool_float_4.example.cpp b/runtime/test/generated/spec_V1_0/max_pool_float_4.example.cpp index 80c564f..f80e8fc 100644 --- a/runtime/test/generated/spec_V1_0/max_pool_float_4.example.cpp +++ b/runtime/test/generated/spec_V1_0/max_pool_float_4.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/max_pool_quant8_1.example.cpp b/runtime/test/generated/spec_V1_0/max_pool_quant8_1.example.cpp index 6a98790..e295c2d 100644 --- a/runtime/test/generated/spec_V1_0/max_pool_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/max_pool_quant8_1.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/max_pool_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/max_pool_quant8_2.example.cpp index a670aa2..7c0858b 100644 --- a/runtime/test/generated/spec_V1_0/max_pool_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/max_pool_quant8_2.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/max_pool_quant8_3.example.cpp b/runtime/test/generated/spec_V1_0/max_pool_quant8_3.example.cpp index 393105e..fb8f974 100644 --- a/runtime/test/generated/spec_V1_0/max_pool_quant8_3.example.cpp +++ b/runtime/test/generated/spec_V1_0/max_pool_quant8_3.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/max_pool_quant8_4.example.cpp b/runtime/test/generated/spec_V1_0/max_pool_quant8_4.example.cpp index 6119275..b373d5e 100644 --- a/runtime/test/generated/spec_V1_0/max_pool_quant8_4.example.cpp +++ b/runtime/test/generated/spec_V1_0/max_pool_quant8_4.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0625f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/mul.example.cpp b/runtime/test/generated/spec_V1_0/mul.example.cpp index 6351aac..404bcbb 100644 --- a/runtime/test/generated/spec_V1_0/mul.example.cpp +++ b/runtime/test/generated/spec_V1_0/mul.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/mul_broadcast_quant8.example.cpp b/runtime/test/generated/spec_V1_0/mul_broadcast_quant8.example.cpp index 96d1a7b..00808ed 100644 --- a/runtime/test/generated/spec_V1_0/mul_broadcast_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_0/mul_broadcast_quant8.example.cpp
@@ -130,7 +130,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/mul_quant8.example.cpp b/runtime/test/generated/spec_V1_0/mul_quant8.example.cpp index 8e664c8..ddab9c7 100644 --- a/runtime/test/generated/spec_V1_0/mul_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_0/mul_quant8.example.cpp
@@ -130,7 +130,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/mul_relu.example.cpp b/runtime/test/generated/spec_V1_0/mul_relu.example.cpp index 75f3f4a..fef8359 100644 --- a/runtime/test/generated/spec_V1_0/mul_relu.example.cpp +++ b/runtime/test/generated/spec_V1_0/mul_relu.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/relu1_float_1.example.cpp b/runtime/test/generated/spec_V1_0/relu1_float_1.example.cpp index 1e8c931..f55e0dd 100644 --- a/runtime/test/generated/spec_V1_0/relu1_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/relu1_float_1.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/relu1_float_2.example.cpp b/runtime/test/generated/spec_V1_0/relu1_float_2.example.cpp index 73659fd..17f5139 100644 --- a/runtime/test/generated/spec_V1_0/relu1_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/relu1_float_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/relu1_quant8_1.example.cpp b/runtime/test/generated/spec_V1_0/relu1_quant8_1.example.cpp index 5d1c9a9..fe204cc 100644 --- a/runtime/test/generated/spec_V1_0/relu1_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/relu1_quant8_1.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/relu1_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/relu1_quant8_2.example.cpp index 40d0cf5..4ed8556 100644 --- a/runtime/test/generated/spec_V1_0/relu1_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/relu1_quant8_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/relu6_float_1.example.cpp b/runtime/test/generated/spec_V1_0/relu6_float_1.example.cpp index 06d67c6..f69d73c 100644 --- a/runtime/test/generated/spec_V1_0/relu6_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/relu6_float_1.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/relu6_float_2.example.cpp b/runtime/test/generated/spec_V1_0/relu6_float_2.example.cpp index 046f5f9..f1756c8 100644 --- a/runtime/test/generated/spec_V1_0/relu6_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/relu6_float_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/relu6_quant8_1.example.cpp b/runtime/test/generated/spec_V1_0/relu6_quant8_1.example.cpp index 12b61b3..354dc93 100644 --- a/runtime/test/generated/spec_V1_0/relu6_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/relu6_quant8_1.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/relu6_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/relu6_quant8_2.example.cpp index 97b7199..37d9acd 100644 --- a/runtime/test/generated/spec_V1_0/relu6_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/relu6_quant8_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/relu_float_1.example.cpp b/runtime/test/generated/spec_V1_0/relu_float_1.example.cpp index 7a5d934..e1f7aa4 100644 --- a/runtime/test/generated/spec_V1_0/relu_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/relu_float_1.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/relu_float_2.example.cpp b/runtime/test/generated/spec_V1_0/relu_float_2.example.cpp index 3e979ed..63d2e75 100644 --- a/runtime/test/generated/spec_V1_0/relu_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/relu_float_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/relu_quant8_1.example.cpp b/runtime/test/generated/spec_V1_0/relu_quant8_1.example.cpp index 7248452..22d53aa 100644 --- a/runtime/test/generated/spec_V1_0/relu_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/relu_quant8_1.example.cpp
@@ -90,7 +90,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/relu_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/relu_quant8_2.example.cpp index a18a2f9..f02ffe1 100644 --- a/runtime/test/generated/spec_V1_0/relu_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/relu_quant8_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/reshape.example.cpp b/runtime/test/generated/spec_V1_0/reshape.example.cpp index a39c5ca..1b9ac59 100644 --- a/runtime/test/generated/spec_V1_0/reshape.example.cpp +++ b/runtime/test/generated/spec_V1_0/reshape.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/reshape_quant8.example.cpp b/runtime/test/generated/spec_V1_0/reshape_quant8.example.cpp index 242f876..9b01c4f 100644 --- a/runtime/test/generated/spec_V1_0/reshape_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_0/reshape_quant8.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/reshape_quant8_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/reshape_quant8_weights_as_inputs.example.cpp index 646a8e8..0fae8ca 100644 --- a/runtime/test/generated/spec_V1_0/reshape_quant8_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/reshape_quant8_weights_as_inputs.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/reshape_weights_as_inputs.example.cpp b/runtime/test/generated/spec_V1_0/reshape_weights_as_inputs.example.cpp index 2eca054..eb8bc79 100644 --- a/runtime/test/generated/spec_V1_0/reshape_weights_as_inputs.example.cpp +++ b/runtime/test/generated/spec_V1_0/reshape_weights_as_inputs.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/resize_bilinear.example.cpp b/runtime/test/generated/spec_V1_0/resize_bilinear.example.cpp index a876a03..188c1f9 100644 --- a/runtime/test/generated/spec_V1_0/resize_bilinear.example.cpp +++ b/runtime/test/generated/spec_V1_0/resize_bilinear.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/resize_bilinear_2.example.cpp b/runtime/test/generated/spec_V1_0/resize_bilinear_2.example.cpp index ee77139..a1cf01c 100644 --- a/runtime/test/generated/spec_V1_0/resize_bilinear_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/resize_bilinear_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/rnn.example.cpp b/runtime/test/generated/spec_V1_0/rnn.example.cpp index 43b5dbe..b07087b 100644 --- a/runtime/test/generated/spec_V1_0/rnn.example.cpp +++ b/runtime/test/generated/spec_V1_0/rnn.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -300,7 +300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -330,7 +330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/rnn_state.example.cpp b/runtime/test/generated/spec_V1_0/rnn_state.example.cpp index e9cff08..aee78aa 100644 --- a/runtime/test/generated/spec_V1_0/rnn_state.example.cpp +++ b/runtime/test/generated/spec_V1_0/rnn_state.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -300,7 +300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -330,7 +330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/softmax_float_1.example.cpp b/runtime/test/generated/spec_V1_0/softmax_float_1.example.cpp index f0ee348..6ade79a 100644 --- a/runtime/test/generated/spec_V1_0/softmax_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/softmax_float_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/softmax_float_2.example.cpp b/runtime/test/generated/spec_V1_0/softmax_float_2.example.cpp index 069d7df..c1bc711 100644 --- a/runtime/test/generated/spec_V1_0/softmax_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/softmax_float_2.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/softmax_quant8_1.example.cpp b/runtime/test/generated/spec_V1_0/softmax_quant8_1.example.cpp index 8e2e4e8..cbeae0c 100644 --- a/runtime/test/generated/spec_V1_0/softmax_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/softmax_quant8_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/softmax_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/softmax_quant8_2.example.cpp index c4b3d6c..9352f8d 100644 --- a/runtime/test/generated/spec_V1_0/softmax_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/softmax_quant8_2.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/space_to_depth_float_1.example.cpp b/runtime/test/generated/spec_V1_0/space_to_depth_float_1.example.cpp index bb26286..0991f34 100644 --- a/runtime/test/generated/spec_V1_0/space_to_depth_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/space_to_depth_float_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/space_to_depth_float_2.example.cpp b/runtime/test/generated/spec_V1_0/space_to_depth_float_2.example.cpp index 69cb727..6112117 100644 --- a/runtime/test/generated/spec_V1_0/space_to_depth_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/space_to_depth_float_2.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/space_to_depth_float_3.example.cpp b/runtime/test/generated/spec_V1_0/space_to_depth_float_3.example.cpp index a6269d5..b2a82e6 100644 --- a/runtime/test/generated/spec_V1_0/space_to_depth_float_3.example.cpp +++ b/runtime/test/generated/spec_V1_0/space_to_depth_float_3.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/space_to_depth_quant8_1.example.cpp b/runtime/test/generated/spec_V1_0/space_to_depth_quant8_1.example.cpp index d8326c5..8610b0c 100644 --- a/runtime/test/generated/spec_V1_0/space_to_depth_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_0/space_to_depth_quant8_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/space_to_depth_quant8_2.example.cpp b/runtime/test/generated/spec_V1_0/space_to_depth_quant8_2.example.cpp index fc190c9..39ce99a 100644 --- a/runtime/test/generated/spec_V1_0/space_to_depth_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_0/space_to_depth_quant8_2.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/svdf.example.cpp b/runtime/test/generated/spec_V1_0/svdf.example.cpp index fda5d42..aa739d9 100644 --- a/runtime/test/generated/spec_V1_0/svdf.example.cpp +++ b/runtime/test/generated/spec_V1_0/svdf.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -320,7 +320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/svdf2.example.cpp b/runtime/test/generated/spec_V1_0/svdf2.example.cpp index 5a56846..d29ec73 100644 --- a/runtime/test/generated/spec_V1_0/svdf2.example.cpp +++ b/runtime/test/generated/spec_V1_0/svdf2.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -320,7 +320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/svdf_bias_present.example.cpp b/runtime/test/generated/spec_V1_0/svdf_bias_present.example.cpp index 0b5bb7f..325a0c6 100644 --- a/runtime/test/generated/spec_V1_0/svdf_bias_present.example.cpp +++ b/runtime/test/generated/spec_V1_0/svdf_bias_present.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -320,7 +320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -350,7 +350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/svdf_state.example.cpp b/runtime/test/generated/spec_V1_0/svdf_state.example.cpp index 06576f5..6270565 100644 --- a/runtime/test/generated/spec_V1_0/svdf_state.example.cpp +++ b/runtime/test/generated/spec_V1_0/svdf_state.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -320,7 +320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_0/tanh.example.cpp b/runtime/test/generated/spec_V1_0/tanh.example.cpp index 904fd00..3b32b63 100644 --- a/runtime/test/generated/spec_V1_0/tanh.example.cpp +++ b/runtime/test/generated/spec_V1_0/tanh.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/add_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/add_relaxed.example.cpp index b3918e4..c0027f8 100644 --- a/runtime/test/generated/spec_V1_1/add_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/add_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/avg_pool_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/avg_pool_float_1_relaxed.example.cpp index b877dac..27233e8 100644 --- a/runtime/test/generated/spec_V1_1/avg_pool_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/avg_pool_float_1_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/avg_pool_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/avg_pool_float_2_relaxed.example.cpp index 8d47968..2d67cc8 100644 --- a/runtime/test/generated/spec_V1_1/avg_pool_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/avg_pool_float_2_relaxed.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/avg_pool_float_3_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/avg_pool_float_3_relaxed.example.cpp index cf1158d..eef79bd 100644 --- a/runtime/test/generated/spec_V1_1/avg_pool_float_3_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/avg_pool_float_3_relaxed.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/avg_pool_float_4_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/avg_pool_float_4_relaxed.example.cpp index a687bbb..1f8cc0d 100644 --- a/runtime/test/generated/spec_V1_1/avg_pool_float_4_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/avg_pool_float_4_relaxed.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/avg_pool_float_5_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/avg_pool_float_5_relaxed.example.cpp index c5e2a6e..a5bf335 100644 --- a/runtime/test/generated/spec_V1_1/avg_pool_float_5_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/avg_pool_float_5_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/batch_to_space.example.cpp b/runtime/test/generated/spec_V1_1/batch_to_space.example.cpp index 10b8e6f..783f88d 100644 --- a/runtime/test/generated/spec_V1_1/batch_to_space.example.cpp +++ b/runtime/test/generated/spec_V1_1/batch_to_space.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/batch_to_space_float_1.example.cpp b/runtime/test/generated/spec_V1_1/batch_to_space_float_1.example.cpp index 0b48006..4c6f550 100644 --- a/runtime/test/generated/spec_V1_1/batch_to_space_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/batch_to_space_float_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/batch_to_space_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/batch_to_space_float_1_relaxed.example.cpp index 34f4cdc..aa2a237 100644 --- a/runtime/test/generated/spec_V1_1/batch_to_space_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/batch_to_space_float_1_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/batch_to_space_quant8_1.example.cpp b/runtime/test/generated/spec_V1_1/batch_to_space_quant8_1.example.cpp index a806d1f..6322a66 100644 --- a/runtime/test/generated/spec_V1_1/batch_to_space_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/batch_to_space_quant8_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/batch_to_space_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/batch_to_space_relaxed.example.cpp index b6f0aa2..59fce32 100644 --- a/runtime/test/generated/spec_V1_1/batch_to_space_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/batch_to_space_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/concat_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/concat_float_1_relaxed.example.cpp index ec0961b..de6776c 100644 --- a/runtime/test/generated/spec_V1_1/concat_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/concat_float_1_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/concat_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/concat_float_2_relaxed.example.cpp index 2165d0d..0133d34 100644 --- a/runtime/test/generated/spec_V1_1/concat_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/concat_float_2_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/concat_float_3_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/concat_float_3_relaxed.example.cpp index e2f315e..cf8a966 100644 --- a/runtime/test/generated/spec_V1_1/concat_float_3_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/concat_float_3_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/conv_1_h3_w2_SAME_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/conv_1_h3_w2_SAME_relaxed.example.cpp index 2c0ecd2..90eb62b 100644 --- a/runtime/test/generated/spec_V1_1/conv_1_h3_w2_SAME_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/conv_1_h3_w2_SAME_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -774,7 +774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1022,7 +1022,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1052,7 +1052,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1082,7 +1082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/conv_1_h3_w2_VALID_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/conv_1_h3_w2_VALID_relaxed.example.cpp index dc3aaf2..66ea8b4 100644 --- a/runtime/test/generated/spec_V1_1/conv_1_h3_w2_VALID_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/conv_1_h3_w2_VALID_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -774,7 +774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1022,7 +1022,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1052,7 +1052,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1082,7 +1082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/conv_3_h3_w2_SAME_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/conv_3_h3_w2_SAME_relaxed.example.cpp index 1042083..dcf0f9b 100644 --- a/runtime/test/generated/spec_V1_1/conv_3_h3_w2_SAME_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/conv_3_h3_w2_SAME_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -774,7 +774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1022,7 +1022,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1052,7 +1052,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1082,7 +1082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/conv_3_h3_w2_VALID_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/conv_3_h3_w2_VALID_relaxed.example.cpp index f573ea1..e704d5d 100644 --- a/runtime/test/generated/spec_V1_1/conv_3_h3_w2_VALID_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/conv_3_h3_w2_VALID_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -774,7 +774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1022,7 +1022,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1052,7 +1052,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1082,7 +1082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/conv_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/conv_float_2_relaxed.example.cpp index a581c89..7785729 100644 --- a/runtime/test/generated/spec_V1_1/conv_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/conv_float_2_relaxed.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -448,7 +448,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -478,7 +478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/conv_float_channels_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/conv_float_channels_relaxed.example.cpp index 5e98624..b5eb76e 100644 --- a/runtime/test/generated/spec_V1_1/conv_float_channels_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/conv_float_channels_relaxed.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -448,7 +448,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -478,7 +478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/conv_float_channels_weights_as_inputs_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/conv_float_channels_weights_as_inputs_relaxed.example.cpp index 87c9a62..ba1557c 100644 --- a/runtime/test/generated/spec_V1_1/conv_float_channels_weights_as_inputs_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/conv_float_channels_weights_as_inputs_relaxed.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -220,7 +220,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -250,7 +250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/conv_float_large_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/conv_float_large_relaxed.example.cpp index 3404ec3..ffefc12 100644 --- a/runtime/test/generated/spec_V1_1/conv_float_large_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/conv_float_large_relaxed.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -448,7 +448,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -478,7 +478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/conv_float_large_weights_as_inputs_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/conv_float_large_weights_as_inputs_relaxed.example.cpp index 3d8e1a5..ceecca1 100644 --- a/runtime/test/generated/spec_V1_1/conv_float_large_weights_as_inputs_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/conv_float_large_weights_as_inputs_relaxed.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -220,7 +220,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -250,7 +250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/conv_float_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/conv_float_relaxed.example.cpp index 18f0c19..baba421 100644 --- a/runtime/test/generated/spec_V1_1/conv_float_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/conv_float_relaxed.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -448,7 +448,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -478,7 +478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/conv_float_weights_as_inputs_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/conv_float_weights_as_inputs_relaxed.example.cpp index 7ec4d74..90eb8e3 100644 --- a/runtime/test/generated/spec_V1_1/conv_float_weights_as_inputs_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/conv_float_weights_as_inputs_relaxed.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -220,7 +220,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -250,7 +250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/depth_to_space_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/depth_to_space_float_1_relaxed.example.cpp index 15f972a..0341b60 100644 --- a/runtime/test/generated/spec_V1_1/depth_to_space_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/depth_to_space_float_1_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/depth_to_space_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/depth_to_space_float_2_relaxed.example.cpp index 21181f2..8bb48c6 100644 --- a/runtime/test/generated/spec_V1_1/depth_to_space_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/depth_to_space_float_2_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/depth_to_space_float_3_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/depth_to_space_float_3_relaxed.example.cpp index 5cff22f..8d558f8 100644 --- a/runtime/test/generated/spec_V1_1/depth_to_space_float_3_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/depth_to_space_float_3_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_2_relaxed.example.cpp index d81104e..8d1de81 100644 --- a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_2_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_2_relaxed.example.cpp index 7a50522..9f2acf1 100644 --- a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_2_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.example.cpp index 4317f77..b7e5aae 100644 --- a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_relaxed.example.cpp index 3f08524..52ebee5 100644 --- a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_weights_as_inputs_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_weights_as_inputs_relaxed.example.cpp index b0d0556..8b30673 100644 --- a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_weights_as_inputs_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_large_weights_as_inputs_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_relaxed.example.cpp index 13e6422..6a858eb 100644 --- a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -488,7 +488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -518,7 +518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_weights_as_inputs_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_weights_as_inputs_relaxed.example.cpp index 3cf198d..d532885 100644 --- a/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_weights_as_inputs_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/depthwise_conv2d_float_weights_as_inputs_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/depthwise_conv_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/depthwise_conv_relaxed.example.cpp index 135dafd..7cc2960 100644 --- a/runtime/test/generated/spec_V1_1/depthwise_conv_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/depthwise_conv_relaxed.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -498,7 +498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -528,7 +528,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -558,7 +558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -834,7 +834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1102,7 +1102,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1132,7 +1132,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1162,7 +1162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/dequantize_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/dequantize_relaxed.example.cpp index 9151fa4..07d8f05 100644 --- a/runtime/test/generated/spec_V1_1/dequantize_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/dequantize_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/div.example.cpp b/runtime/test/generated/spec_V1_1/div.example.cpp index d152d5d..7e017ac 100644 --- a/runtime/test/generated/spec_V1_1/div.example.cpp +++ b/runtime/test/generated/spec_V1_1/div.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -332,7 +332,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -362,7 +362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -534,7 +534,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -564,7 +564,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/div_broadcast_float.example.cpp b/runtime/test/generated/spec_V1_1/div_broadcast_float.example.cpp index cdc8f45..b77ab93 100644 --- a/runtime/test/generated/spec_V1_1/div_broadcast_float.example.cpp +++ b/runtime/test/generated/spec_V1_1/div_broadcast_float.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/div_broadcast_float_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/div_broadcast_float_relaxed.example.cpp index 2a3ecf0..d150bdb 100644 --- a/runtime/test/generated/spec_V1_1/div_broadcast_float_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/div_broadcast_float_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/div_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/div_relaxed.example.cpp index dd7a799..1bb2da5 100644 --- a/runtime/test/generated/spec_V1_1/div_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/div_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/embedding_lookup_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/embedding_lookup_relaxed.example.cpp index 23341e1..2d9b9f0 100644 --- a/runtime/test/generated/spec_V1_1/embedding_lookup_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/embedding_lookup_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/floor_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/floor_relaxed.example.cpp index ea1ba72..227d3fb 100644 --- a/runtime/test/generated/spec_V1_1/floor_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/floor_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/fully_connected_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/fully_connected_float_2_relaxed.example.cpp index c1b3241..1ec84c9 100644 --- a/runtime/test/generated/spec_V1_1/fully_connected_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/fully_connected_float_2_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -398,7 +398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/fully_connected_float_4d_simple.example.cpp b/runtime/test/generated/spec_V1_1/fully_connected_float_4d_simple.example.cpp index 7bfacff..4571a40 100644 --- a/runtime/test/generated/spec_V1_1/fully_connected_float_4d_simple.example.cpp +++ b/runtime/test/generated/spec_V1_1/fully_connected_float_4d_simple.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -398,7 +398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/fully_connected_float_4d_simple_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/fully_connected_float_4d_simple_relaxed.example.cpp index 20743ed..3a858df 100644 --- a/runtime/test/generated/spec_V1_1/fully_connected_float_4d_simple_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/fully_connected_float_4d_simple_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -398,7 +398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/fully_connected_float_large_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/fully_connected_float_large_relaxed.example.cpp index 4d47df5..762f39d 100644 --- a/runtime/test/generated/spec_V1_1/fully_connected_float_large_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/fully_connected_float_large_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -398,7 +398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/fully_connected_float_large_weights_as_inputs_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/fully_connected_float_large_weights_as_inputs_relaxed.example.cpp index c1d7e9b..d420545 100644 --- a/runtime/test/generated/spec_V1_1/fully_connected_float_large_weights_as_inputs_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/fully_connected_float_large_weights_as_inputs_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -180,7 +180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/fully_connected_float_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/fully_connected_float_relaxed.example.cpp index f3c1a40..ffb6e01 100644 --- a/runtime/test/generated/spec_V1_1/fully_connected_float_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/fully_connected_float_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -398,7 +398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/fully_connected_float_weights_as_inputs_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/fully_connected_float_weights_as_inputs_relaxed.example.cpp index 0d093d5..62f58ac 100644 --- a/runtime/test/generated/spec_V1_1/fully_connected_float_weights_as_inputs_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/fully_connected_float_weights_as_inputs_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -180,7 +180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/hashtable_lookup_float_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/hashtable_lookup_float_relaxed.example.cpp index c2752fd..2ae0f40 100644 --- a/runtime/test/generated/spec_V1_1/hashtable_lookup_float_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/hashtable_lookup_float_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/l2_normalization_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/l2_normalization_2_relaxed.example.cpp index 6cec72d..64a826e 100644 --- a/runtime/test/generated/spec_V1_1/l2_normalization_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/l2_normalization_2_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/l2_normalization_large_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/l2_normalization_large_relaxed.example.cpp index 4360e7c..21382b4 100644 --- a/runtime/test/generated/spec_V1_1/l2_normalization_large_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/l2_normalization_large_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/l2_normalization_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/l2_normalization_relaxed.example.cpp index 3a1aeab..fc7cf83 100644 --- a/runtime/test/generated/spec_V1_1/l2_normalization_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/l2_normalization_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/l2_pool_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/l2_pool_float_2_relaxed.example.cpp index 458fa38..30364c5 100644 --- a/runtime/test/generated/spec_V1_1/l2_pool_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/l2_pool_float_2_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/l2_pool_float_large_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/l2_pool_float_large_relaxed.example.cpp index 40cfe42..f6ad19d 100644 --- a/runtime/test/generated/spec_V1_1/l2_pool_float_large_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/l2_pool_float_large_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/l2_pool_float_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/l2_pool_float_relaxed.example.cpp index 54f8da8..ed1db7f 100644 --- a/runtime/test/generated/spec_V1_1/l2_pool_float_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/l2_pool_float_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/local_response_norm_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/local_response_norm_float_1_relaxed.example.cpp index 8bc9a9b..f69e8c1 100644 --- a/runtime/test/generated/spec_V1_1/local_response_norm_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/local_response_norm_float_1_relaxed.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/local_response_norm_float_4_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/local_response_norm_float_4_relaxed.example.cpp index 7cee6fc..247bea3 100644 --- a/runtime/test/generated/spec_V1_1/local_response_norm_float_4_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/local_response_norm_float_4_relaxed.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/logistic_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/logistic_float_1_relaxed.example.cpp index 92acbd6..318f056 100644 --- a/runtime/test/generated/spec_V1_1/logistic_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/logistic_float_1_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/logistic_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/logistic_float_2_relaxed.example.cpp index 9ae2910..594d7bc 100644 --- a/runtime/test/generated/spec_V1_1/logistic_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/logistic_float_2_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/lsh_projection_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/lsh_projection_relaxed.example.cpp index 408fc67..aa7ccfb 100644 --- a/runtime/test/generated/spec_V1_1/lsh_projection_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/lsh_projection_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/lsh_projection_weights_as_inputs_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/lsh_projection_weights_as_inputs_relaxed.example.cpp index 9a968bf..1fb3794 100644 --- a/runtime/test/generated/spec_V1_1/lsh_projection_weights_as_inputs_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/lsh_projection_weights_as_inputs_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -180,7 +180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/lstm2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/lstm2_relaxed.example.cpp index 72033b6..944c924 100644 --- a/runtime/test/generated/spec_V1_1/lstm2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/lstm2_relaxed.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/lstm2_state2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/lstm2_state2_relaxed.example.cpp index 2d253df..c8ed803 100644 --- a/runtime/test/generated/spec_V1_1/lstm2_state2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/lstm2_state2_relaxed.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/lstm2_state_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/lstm2_state_relaxed.example.cpp index 23cb4d5..9ce15a9 100644 --- a/runtime/test/generated/spec_V1_1/lstm2_state_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/lstm2_state_relaxed.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/lstm3_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/lstm3_relaxed.example.cpp index f4c0755..572d2cc 100644 --- a/runtime/test/generated/spec_V1_1/lstm3_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/lstm3_relaxed.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1070,7 +1070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1130,7 +1130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/lstm3_state2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/lstm3_state2_relaxed.example.cpp index a2379bb..d59e21f 100644 --- a/runtime/test/generated/spec_V1_1/lstm3_state2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/lstm3_state2_relaxed.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1070,7 +1070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1130,7 +1130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/lstm3_state3_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/lstm3_state3_relaxed.example.cpp index 90d5d1f..bf555d2 100644 --- a/runtime/test/generated/spec_V1_1/lstm3_state3_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/lstm3_state3_relaxed.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1070,7 +1070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1130,7 +1130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/lstm3_state_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/lstm3_state_relaxed.example.cpp index 3ada8df..635d745 100644 --- a/runtime/test/generated/spec_V1_1/lstm3_state_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/lstm3_state_relaxed.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1070,7 +1070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1130,7 +1130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/lstm_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/lstm_relaxed.example.cpp index f989c1d..39f57e5 100644 --- a/runtime/test/generated/spec_V1_1/lstm_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/lstm_relaxed.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/lstm_state2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/lstm_state2_relaxed.example.cpp index d0d197a..3d62a04 100644 --- a/runtime/test/generated/spec_V1_1/lstm_state2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/lstm_state2_relaxed.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/lstm_state_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/lstm_state_relaxed.example.cpp index e812391..3e2c1ca 100644 --- a/runtime/test/generated/spec_V1_1/lstm_state_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/lstm_state_relaxed.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/max_pool_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/max_pool_float_1_relaxed.example.cpp index 1cd975b..af0cb5c 100644 --- a/runtime/test/generated/spec_V1_1/max_pool_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/max_pool_float_1_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/max_pool_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/max_pool_float_2_relaxed.example.cpp index 24f75bf..7ccba5b 100644 --- a/runtime/test/generated/spec_V1_1/max_pool_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/max_pool_float_2_relaxed.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/max_pool_float_3_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/max_pool_float_3_relaxed.example.cpp index 0bf179b..cdbf4e7 100644 --- a/runtime/test/generated/spec_V1_1/max_pool_float_3_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/max_pool_float_3_relaxed.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/max_pool_float_4_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/max_pool_float_4_relaxed.example.cpp index 69c2e6b..41de52e 100644 --- a/runtime/test/generated/spec_V1_1/max_pool_float_4_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/max_pool_float_4_relaxed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/mean.example.cpp b/runtime/test/generated/spec_V1_1/mean.example.cpp index 1d97d4f..bd3c63b 100644 --- a/runtime/test/generated/spec_V1_1/mean.example.cpp +++ b/runtime/test/generated/spec_V1_1/mean.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/mean_b155508675.example.cpp b/runtime/test/generated/spec_V1_1/mean_b155508675.example.cpp index 0715de3..4cd5424 100644 --- a/runtime/test/generated/spec_V1_1/mean_b155508675.example.cpp +++ b/runtime/test/generated/spec_V1_1/mean_b155508675.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/mean_float_1.example.cpp b/runtime/test/generated/spec_V1_1/mean_float_1.example.cpp index 35c9b2d..2a60edc 100644 --- a/runtime/test/generated/spec_V1_1/mean_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/mean_float_1.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/mean_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/mean_float_1_relaxed.example.cpp index 65414c7..adf1aa1 100644 --- a/runtime/test/generated/spec_V1_1/mean_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/mean_float_1_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/mean_float_2.example.cpp b/runtime/test/generated/spec_V1_1/mean_float_2.example.cpp index 3d5d968..c113866 100644 --- a/runtime/test/generated/spec_V1_1/mean_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_1/mean_float_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/mean_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/mean_float_2_relaxed.example.cpp index 769a8b7..922201e 100644 --- a/runtime/test/generated/spec_V1_1/mean_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/mean_float_2_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/mean_quant8_1.example.cpp b/runtime/test/generated/spec_V1_1/mean_quant8_1.example.cpp index f4bbe02..5760bdb 100644 --- a/runtime/test/generated/spec_V1_1/mean_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/mean_quant8_1.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.8f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 5 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({5}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.8f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 5 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({5}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/mean_quant8_2.example.cpp b/runtime/test/generated/spec_V1_1/mean_quant8_2.example.cpp index 0800f09..0f24e6f 100644 --- a/runtime/test/generated/spec_V1_1/mean_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_1/mean_quant8_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.8f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 5 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({5}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.8f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 5 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({5}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/mean_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/mean_relaxed.example.cpp index c431621..526e271 100644 --- a/runtime/test/generated/spec_V1_1/mean_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/mean_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/mul_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/mul_relaxed.example.cpp index de555d7..3e68a55 100644 --- a/runtime/test/generated/spec_V1_1/mul_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/mul_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/mul_relu_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/mul_relu_relaxed.example.cpp index 584726c..8e71acf 100644 --- a/runtime/test/generated/spec_V1_1/mul_relu_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/mul_relu_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/pad.example.cpp b/runtime/test/generated/spec_V1_1/pad.example.cpp index 67a4960..7016015 100644 --- a/runtime/test/generated/spec_V1_1/pad.example.cpp +++ b/runtime/test/generated/spec_V1_1/pad.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/pad_float_1.example.cpp b/runtime/test/generated/spec_V1_1/pad_float_1.example.cpp index bd10fb8..e0c9018 100644 --- a/runtime/test/generated/spec_V1_1/pad_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/pad_float_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/pad_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/pad_float_1_relaxed.example.cpp index 064a085..3aedb0d 100644 --- a/runtime/test/generated/spec_V1_1/pad_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/pad_float_1_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/pad_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/pad_relaxed.example.cpp index ee02762..761c3be 100644 --- a/runtime/test/generated/spec_V1_1/pad_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/pad_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/relu1_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/relu1_float_1_relaxed.example.cpp index da852a8..2e488a9 100644 --- a/runtime/test/generated/spec_V1_1/relu1_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/relu1_float_1_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/relu1_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/relu1_float_2_relaxed.example.cpp index 65c7f19..32f9ff6 100644 --- a/runtime/test/generated/spec_V1_1/relu1_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/relu1_float_2_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/relu6_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/relu6_float_1_relaxed.example.cpp index 5182efd..74290f1 100644 --- a/runtime/test/generated/spec_V1_1/relu6_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/relu6_float_1_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/relu6_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/relu6_float_2_relaxed.example.cpp index cb9daac..7907ef0 100644 --- a/runtime/test/generated/spec_V1_1/relu6_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/relu6_float_2_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/relu_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/relu_float_1_relaxed.example.cpp index dc0c536..09a867c 100644 --- a/runtime/test/generated/spec_V1_1/relu_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/relu_float_1_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/relu_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/relu_float_2_relaxed.example.cpp index 4e45efb..f0467bf 100644 --- a/runtime/test/generated/spec_V1_1/relu_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/relu_float_2_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/reshape_b138150365.example.cpp b/runtime/test/generated/spec_V1_1/reshape_b138150365.example.cpp index a348097..385772f 100644 --- a/runtime/test/generated/spec_V1_1/reshape_b138150365.example.cpp +++ b/runtime/test/generated/spec_V1_1/reshape_b138150365.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/reshape_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/reshape_relaxed.example.cpp index 428b78e..7f67ff8 100644 --- a/runtime/test/generated/spec_V1_1/reshape_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/reshape_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/reshape_weights_as_inputs_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/reshape_weights_as_inputs_relaxed.example.cpp index 733d97b..d83899f 100644 --- a/runtime/test/generated/spec_V1_1/reshape_weights_as_inputs_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/reshape_weights_as_inputs_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/resize_bilinear_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/resize_bilinear_2_relaxed.example.cpp index 5c5aa31..98a48b4 100644 --- a/runtime/test/generated/spec_V1_1/resize_bilinear_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/resize_bilinear_2_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/resize_bilinear_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/resize_bilinear_relaxed.example.cpp index 7f091d8..dcafa2a 100644 --- a/runtime/test/generated/spec_V1_1/resize_bilinear_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/resize_bilinear_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/rnn_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/rnn_relaxed.example.cpp index ace8368..864a82b 100644 --- a/runtime/test/generated/spec_V1_1/rnn_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/rnn_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -300,7 +300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -330,7 +330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/rnn_state_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/rnn_state_relaxed.example.cpp index 27363fb..80257cf 100644 --- a/runtime/test/generated/spec_V1_1/rnn_state_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/rnn_state_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -300,7 +300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -330,7 +330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/softmax_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/softmax_float_1_relaxed.example.cpp index e679a6c..cd97137 100644 --- a/runtime/test/generated/spec_V1_1/softmax_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/softmax_float_1_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/softmax_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/softmax_float_2_relaxed.example.cpp index 3de4974..c59bdbb 100644 --- a/runtime/test/generated/spec_V1_1/softmax_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/softmax_float_2_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_batch.example.cpp b/runtime/test/generated/spec_V1_1/space_to_batch.example.cpp index 073f297..d57113a 100644 --- a/runtime/test/generated/spec_V1_1/space_to_batch.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_batch.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_batch_float_1.example.cpp b/runtime/test/generated/spec_V1_1/space_to_batch_float_1.example.cpp index fcd5640..52dd821 100644 --- a/runtime/test/generated/spec_V1_1/space_to_batch_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_batch_float_1.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_batch_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/space_to_batch_float_1_relaxed.example.cpp index bdf5d84..2e4ac45 100644 --- a/runtime/test/generated/spec_V1_1/space_to_batch_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_batch_float_1_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_batch_float_2.example.cpp b/runtime/test/generated/spec_V1_1/space_to_batch_float_2.example.cpp index ee24d03..656aa7e 100644 --- a/runtime/test/generated/spec_V1_1/space_to_batch_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_batch_float_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_batch_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/space_to_batch_float_2_relaxed.example.cpp index b082e70..3de4087 100644 --- a/runtime/test/generated/spec_V1_1/space_to_batch_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_batch_float_2_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_batch_float_3.example.cpp b/runtime/test/generated/spec_V1_1/space_to_batch_float_3.example.cpp index 72206eb..ccf3e7f 100644 --- a/runtime/test/generated/spec_V1_1/space_to_batch_float_3.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_batch_float_3.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_batch_float_3_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/space_to_batch_float_3_relaxed.example.cpp index 81f5d37..67a2ace 100644 --- a/runtime/test/generated/spec_V1_1/space_to_batch_float_3_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_batch_float_3_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_batch_quant8_1.example.cpp b/runtime/test/generated/spec_V1_1/space_to_batch_quant8_1.example.cpp index 6669246..6346b7a 100644 --- a/runtime/test/generated/spec_V1_1/space_to_batch_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_batch_quant8_1.example.cpp
@@ -130,7 +130,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_batch_quant8_2.example.cpp b/runtime/test/generated/spec_V1_1/space_to_batch_quant8_2.example.cpp index 35f75f3..62c0cb9 100644 --- a/runtime/test/generated/spec_V1_1/space_to_batch_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_batch_quant8_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_batch_quant8_3.example.cpp b/runtime/test/generated/spec_V1_1/space_to_batch_quant8_3.example.cpp index 905d174..4418939 100644 --- a/runtime/test/generated/spec_V1_1/space_to_batch_quant8_3.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_batch_quant8_3.example.cpp
@@ -130,7 +130,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_batch_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/space_to_batch_relaxed.example.cpp index bff67af..68655a2 100644 --- a/runtime/test/generated/spec_V1_1/space_to_batch_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_batch_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_depth_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/space_to_depth_float_1_relaxed.example.cpp index 30d68a9..ba0ba7d 100644 --- a/runtime/test/generated/spec_V1_1/space_to_depth_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_depth_float_1_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_depth_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/space_to_depth_float_2_relaxed.example.cpp index e44932b..090b693 100644 --- a/runtime/test/generated/spec_V1_1/space_to_depth_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_depth_float_2_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/space_to_depth_float_3_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/space_to_depth_float_3_relaxed.example.cpp index bde69e2..a58b42b 100644 --- a/runtime/test/generated/spec_V1_1/space_to_depth_float_3_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/space_to_depth_float_3_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/squeeze.example.cpp b/runtime/test/generated/spec_V1_1/squeeze.example.cpp index 1fad0a6..b5533c5 100644 --- a/runtime/test/generated/spec_V1_1/squeeze.example.cpp +++ b/runtime/test/generated/spec_V1_1/squeeze.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/squeeze_b155238914.example.cpp b/runtime/test/generated/spec_V1_1/squeeze_b155238914.example.cpp index 5b888dd..1dec059 100644 --- a/runtime/test/generated/spec_V1_1/squeeze_b155238914.example.cpp +++ b/runtime/test/generated/spec_V1_1/squeeze_b155238914.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/squeeze_float_1.example.cpp b/runtime/test/generated/spec_V1_1/squeeze_float_1.example.cpp index 5c62a61..14795c1 100644 --- a/runtime/test/generated/spec_V1_1/squeeze_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/squeeze_float_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/squeeze_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/squeeze_float_1_relaxed.example.cpp index 1621efd..746cd54 100644 --- a/runtime/test/generated/spec_V1_1/squeeze_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/squeeze_float_1_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/squeeze_omitted.example.cpp b/runtime/test/generated/spec_V1_1/squeeze_omitted.example.cpp index 70d31e1..2d15ef2 100644 --- a/runtime/test/generated/spec_V1_1/squeeze_omitted.example.cpp +++ b/runtime/test/generated/spec_V1_1/squeeze_omitted.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/squeeze_quant8_1.example.cpp b/runtime/test/generated/spec_V1_1/squeeze_quant8_1.example.cpp index e54b1ce..ec3349b 100644 --- a/runtime/test/generated/spec_V1_1/squeeze_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/squeeze_quant8_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/squeeze_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/squeeze_relaxed.example.cpp index a8aef96..1794610 100644 --- a/runtime/test/generated/spec_V1_1/squeeze_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/squeeze_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice.example.cpp index 99fd608..6359a73 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_b155662254.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_b155662254.example.cpp index 9e72236..2923cbe 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_b155662254.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_b155662254.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_1.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_1.example.cpp index 1ed0121..c3e65dc 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_1.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_10.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_10.example.cpp index 85f8bf0..a2ef663 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_10.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_10.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_10_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_10_relaxed.example.cpp index 3f161f0..e9c220f 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_10_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_10_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_11.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_11.example.cpp index 1bf3eef..12feb17 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_11.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_11.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_11_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_11_relaxed.example.cpp index e0204f2..6bc4c9a 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_11_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_11_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_1_relaxed.example.cpp index d0fd22c..75ca08d 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_1_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_2.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_2.example.cpp index ec9bf76..0988697 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_2.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_2_relaxed.example.cpp index dbd369f..e8b622e 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_2_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_3.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_3.example.cpp index ae9213e..556bea1 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_3.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_3.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_3_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_3_relaxed.example.cpp index 1c4430f..6922abc 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_3_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_3_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_4.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_4.example.cpp index d1b610b..1c9266e 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_4.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_4.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_4_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_4_relaxed.example.cpp index f06c3c3..881176f 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_4_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_4_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_5.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_5.example.cpp index eb4ea92..a579226 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_5.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_5.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_5_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_5_relaxed.example.cpp index 05c0b2b..49d4bdd 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_5_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_5_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_6.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_6.example.cpp index af91117..67528dd 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_6.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_6.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_6_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_6_relaxed.example.cpp index d879907..5155a1a 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_6_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_6_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_7.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_7.example.cpp index 728df01..8b2ac49 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_7.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_7.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_7_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_7_relaxed.example.cpp index 5624999..39ba458 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_7_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_7_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_8.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_8.example.cpp index ff93c62..f8d280d 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_8.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_8.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_8_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_8_relaxed.example.cpp index 0ae7a03..8854413 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_8_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_8_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_9.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_9.example.cpp index 8915467..eca07da 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_9.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_9.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_float_9_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_float_9_relaxed.example.cpp index f3c16ab..68450a4 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_float_9_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_float_9_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_qaunt8_10.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_qaunt8_10.example.cpp index c530aba..aa7efd2 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_qaunt8_10.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_qaunt8_10.example.cpp
@@ -210,7 +210,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_qaunt8_11.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_qaunt8_11.example.cpp index e73976f..1271b4b 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_qaunt8_11.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_qaunt8_11.example.cpp
@@ -210,7 +210,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_quant8_1.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_quant8_1.example.cpp index dd97703..232d400 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_quant8_1.example.cpp
@@ -210,7 +210,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_quant8_2.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_quant8_2.example.cpp index 2f937a8..c7b8260 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_quant8_2.example.cpp
@@ -210,7 +210,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_quant8_3.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_quant8_3.example.cpp index 28bb545..2281d02 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_quant8_3.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_quant8_3.example.cpp
@@ -210,7 +210,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_quant8_4.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_quant8_4.example.cpp index dfff77a..cfb69d8 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_quant8_4.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_quant8_4.example.cpp
@@ -210,7 +210,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_quant8_5.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_quant8_5.example.cpp index ebb71b0..802e27d 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_quant8_5.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_quant8_5.example.cpp
@@ -210,7 +210,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_quant8_6.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_quant8_6.example.cpp index 913b48a..b39b4a8 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_quant8_6.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_quant8_6.example.cpp
@@ -210,7 +210,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_quant8_7.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_quant8_7.example.cpp index 5eea2fd..539c81a 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_quant8_7.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_quant8_7.example.cpp
@@ -210,7 +210,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_quant8_8.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_quant8_8.example.cpp index ba4b53b..7749d52 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_quant8_8.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_quant8_8.example.cpp
@@ -210,7 +210,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_quant8_9.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_quant8_9.example.cpp index 3bbe74e..3fe4fe2 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_quant8_9.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_quant8_9.example.cpp
@@ -210,7 +210,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/strided_slice_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/strided_slice_relaxed.example.cpp index e382f0f..f9a9c09 100644 --- a/runtime/test/generated/spec_V1_1/strided_slice_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/strided_slice_relaxed.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/sub.example.cpp b/runtime/test/generated/spec_V1_1/sub.example.cpp index 14473a0..80db7fd 100644 --- a/runtime/test/generated/spec_V1_1/sub.example.cpp +++ b/runtime/test/generated/spec_V1_1/sub.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/sub_broadcast_float.example.cpp b/runtime/test/generated/spec_V1_1/sub_broadcast_float.example.cpp index a3d86f5..dd77199 100644 --- a/runtime/test/generated/spec_V1_1/sub_broadcast_float.example.cpp +++ b/runtime/test/generated/spec_V1_1/sub_broadcast_float.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/sub_broadcast_float_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/sub_broadcast_float_relaxed.example.cpp index d0edc77..85201e2 100644 --- a/runtime/test/generated/spec_V1_1/sub_broadcast_float_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/sub_broadcast_float_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/sub_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/sub_relaxed.example.cpp index 6631519..9927e2e 100644 --- a/runtime/test/generated/spec_V1_1/sub_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/sub_relaxed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/svdf2_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/svdf2_relaxed.example.cpp index 0caf94e..c00479d 100644 --- a/runtime/test/generated/spec_V1_1/svdf2_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/svdf2_relaxed.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -320,7 +320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/svdf_bias_present_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/svdf_bias_present_relaxed.example.cpp index 57a25a4..564c23d 100644 --- a/runtime/test/generated/spec_V1_1/svdf_bias_present_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/svdf_bias_present_relaxed.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -320,7 +320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -350,7 +350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/svdf_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/svdf_relaxed.example.cpp index f777899..6ac48bc 100644 --- a/runtime/test/generated/spec_V1_1/svdf_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/svdf_relaxed.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -320,7 +320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/svdf_state_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/svdf_state_relaxed.example.cpp index 2014ecb..70ba701 100644 --- a/runtime/test/generated/spec_V1_1/svdf_state_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/svdf_state_relaxed.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -320,7 +320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/tanh_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/tanh_relaxed.example.cpp index 4722acd..0a605c7 100644 --- a/runtime/test/generated/spec_V1_1/tanh_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/tanh_relaxed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/transpose.example.cpp b/runtime/test/generated/spec_V1_1/transpose.example.cpp index 16907c1..c398eff 100644 --- a/runtime/test/generated/spec_V1_1/transpose.example.cpp +++ b/runtime/test/generated/spec_V1_1/transpose.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/transpose_float_1.example.cpp b/runtime/test/generated/spec_V1_1/transpose_float_1.example.cpp index 94ba735..e7bfe89 100644 --- a/runtime/test/generated/spec_V1_1/transpose_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/transpose_float_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/transpose_float_1_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/transpose_float_1_relaxed.example.cpp index dfee6be..1da44d1 100644 --- a/runtime/test/generated/spec_V1_1/transpose_float_1_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/transpose_float_1_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/transpose_quant8_1.example.cpp b/runtime/test/generated/spec_V1_1/transpose_quant8_1.example.cpp index 91681d8..5819c65 100644 --- a/runtime/test/generated/spec_V1_1/transpose_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_1/transpose_quant8_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_1/transpose_relaxed.example.cpp b/runtime/test/generated/spec_V1_1/transpose_relaxed.example.cpp index 1061450..c8c8376 100644 --- a/runtime/test/generated/spec_V1_1/transpose_relaxed.example.cpp +++ b/runtime/test/generated/spec_V1_1/transpose_relaxed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/add_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/add_v1_2.example.cpp index 2b988d2..a2243ef 100644 --- a/runtime/test/generated/spec_V1_2/add_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/add_v1_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -332,7 +332,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -362,7 +362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/argmax_1.example.cpp b/runtime/test/generated/spec_V1_2/argmax_1.example.cpp index 4a70145..cd89b96 100644 --- a/runtime/test/generated/spec_V1_2/argmax_1.example.cpp +++ b/runtime/test/generated/spec_V1_2/argmax_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -611,7 +611,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/argmax_2.example.cpp b/runtime/test/generated/spec_V1_2/argmax_2.example.cpp index 2ecf213..bfbe4f4 100644 --- a/runtime/test/generated/spec_V1_2/argmax_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/argmax_2.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -611,7 +611,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/argmax_3.example.cpp b/runtime/test/generated/spec_V1_2/argmax_3.example.cpp index ef627e6..01a23c2 100644 --- a/runtime/test/generated/spec_V1_2/argmax_3.example.cpp +++ b/runtime/test/generated/spec_V1_2/argmax_3.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -611,7 +611,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/argmax_b155660285.example.cpp b/runtime/test/generated/spec_V1_2/argmax_b155660285.example.cpp index 2fac8ec..1ebca72 100644 --- a/runtime/test/generated/spec_V1_2/argmax_b155660285.example.cpp +++ b/runtime/test/generated/spec_V1_2/argmax_b155660285.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/argmin_1.example.cpp b/runtime/test/generated/spec_V1_2/argmin_1.example.cpp index 9d9d622..e5b94a2 100644 --- a/runtime/test/generated/spec_V1_2/argmin_1.example.cpp +++ b/runtime/test/generated/spec_V1_2/argmin_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -611,7 +611,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/argmin_2.example.cpp b/runtime/test/generated/spec_V1_2/argmin_2.example.cpp index 1a8000a..2e08d28 100644 --- a/runtime/test/generated/spec_V1_2/argmin_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/argmin_2.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -611,7 +611,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/argmin_3.example.cpp b/runtime/test/generated/spec_V1_2/argmin_3.example.cpp index f75abad..002e471 100644 --- a/runtime/test/generated/spec_V1_2/argmin_3.example.cpp +++ b/runtime/test/generated/spec_V1_2/argmin_3.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -611,7 +611,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/argmin_b155660285.example.cpp b/runtime/test/generated/spec_V1_2/argmin_b155660285.example.cpp index 1b8858b..cfa50c6 100644 --- a/runtime/test/generated/spec_V1_2/argmin_b155660285.example.cpp +++ b/runtime/test/generated/spec_V1_2/argmin_b155660285.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/avg_pool_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/avg_pool_v1_2.example.cpp index 82e8b93..b1ed528 100644 --- a/runtime/test/generated/spec_V1_2/avg_pool_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/avg_pool_v1_2.example.cpp
@@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -618,7 +618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -946,7 +946,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1274,7 +1274,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1602,7 +1602,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1930,7 +1930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2258,7 +2258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2586,7 +2586,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2914,7 +2914,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3242,7 +3242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3570,7 +3570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3898,7 +3898,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4226,7 +4226,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4554,7 +4554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4882,7 +4882,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5210,7 +5210,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5538,7 +5538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5866,7 +5866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6194,7 +6194,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6522,7 +6522,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6850,7 +6850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7178,7 +7178,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7506,7 +7506,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7834,7 +7834,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -8162,7 +8162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8490,7 +8490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8818,7 +8818,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9146,7 +9146,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -9474,7 +9474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9802,7 +9802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10130,7 +10130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10458,7 +10458,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -10726,7 +10726,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10994,7 +10994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11262,7 +11262,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11530,7 +11530,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -11798,7 +11798,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12066,7 +12066,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12334,7 +12334,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12602,7 +12602,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/axis_aligned_bbox_transform.example.cpp b/runtime/test/generated/spec_V1_2/axis_aligned_bbox_transform.example.cpp index d23ae51..67585a6 100644 --- a/runtime/test/generated/spec_V1_2/axis_aligned_bbox_transform.example.cpp +++ b/runtime/test/generated/spec_V1_2/axis_aligned_bbox_transform.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -180,7 +180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -436,7 +436,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -662,7 +662,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -692,7 +692,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -722,7 +722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -918,7 +918,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1106,7 +1106,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1136,7 +1136,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1166,7 +1166,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1362,7 +1362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1392,7 +1392,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1422,7 +1422,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1618,7 +1618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1648,7 +1648,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1678,7 +1678,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1874,7 +1874,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/batch_to_space_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/batch_to_space_v1_2.example.cpp index e2eb915..4431100 100644 --- a/runtime/test/generated/spec_V1_2/batch_to_space_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/batch_to_space_v1_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1474,7 +1474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1642,7 +1642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2146,7 +2146,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2314,7 +2314,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2482,7 +2482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2650,7 +2650,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm.example.cpp b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm.example.cpp index 7534451..7f39204 100644 --- a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm.example.cpp +++ b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm.example.cpp
@@ -1310,7 +1310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1340,7 +1340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1370,7 +1370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1400,7 +1400,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1430,7 +1430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1460,7 +1460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1490,7 +1490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1520,7 +1520,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1550,7 +1550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1580,7 +1580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1610,7 +1610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1640,7 +1640,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1670,7 +1670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1700,7 +1700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1730,7 +1730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1760,7 +1760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1790,7 +1790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1820,7 +1820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1850,7 +1850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1880,7 +1880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1910,7 +1910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1940,7 +1940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1970,7 +1970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2000,7 +2000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2030,7 +2030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2060,7 +2060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2090,7 +2090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2120,7 +2120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2150,7 +2150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_aux_input.example.cpp b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_aux_input.example.cpp index 3c6f0c6..b9c4cd4 100644 --- a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_aux_input.example.cpp +++ b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_aux_input.example.cpp
@@ -1310,7 +1310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1340,7 +1340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1370,7 +1370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1400,7 +1400,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1430,7 +1430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1460,7 +1460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1490,7 +1490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1520,7 +1520,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1550,7 +1550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1580,7 +1580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1610,7 +1610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1640,7 +1640,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1670,7 +1670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1700,7 +1700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1730,7 +1730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1760,7 +1760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1790,7 +1790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1820,7 +1820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1850,7 +1850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1880,7 +1880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1910,7 +1910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1940,7 +1940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1970,7 +1970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2000,7 +2000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2030,7 +2030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2060,7 +2060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2090,7 +2090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2120,7 +2120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2150,7 +2150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2180,7 +2180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2210,7 +2210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2240,7 +2240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2270,7 +2270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2300,7 +2300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2330,7 +2330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2360,7 +2360,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2390,7 +2390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2420,7 +2420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_cifg_peephole.example.cpp b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_cifg_peephole.example.cpp index 9d1ab12..28f274d 100644 --- a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_cifg_peephole.example.cpp +++ b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_cifg_peephole.example.cpp
@@ -1310,7 +1310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1340,7 +1340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1370,7 +1370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1400,7 +1400,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1430,7 +1430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1460,7 +1460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1490,7 +1490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1520,7 +1520,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1550,7 +1550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1580,7 +1580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1610,7 +1610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1640,7 +1640,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1670,7 +1670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1700,7 +1700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1730,7 +1730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1760,7 +1760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1790,7 +1790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1820,7 +1820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1850,7 +1850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1880,7 +1880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1910,7 +1910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1940,7 +1940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1970,7 +1970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2000,7 +2000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2030,7 +2030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2060,7 +2060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2090,7 +2090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_float16_batch_major.example.cpp b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_float16_batch_major.example.cpp index 15ba7e5..3a3ac8a 100644 --- a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_float16_batch_major.example.cpp +++ b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_float16_batch_major.example.cpp
@@ -1310,7 +1310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1340,7 +1340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1370,7 +1370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1400,7 +1400,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1430,7 +1430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1460,7 +1460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1490,7 +1490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1520,7 +1520,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1550,7 +1550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1580,7 +1580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1610,7 +1610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1640,7 +1640,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1670,7 +1670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1700,7 +1700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1730,7 +1730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1760,7 +1760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1790,7 +1790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1820,7 +1820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1850,7 +1850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1880,7 +1880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1910,7 +1910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1940,7 +1940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1970,7 +1970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2000,7 +2000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2030,7 +2030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2060,7 +2060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2090,7 +2090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2120,7 +2120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2150,7 +2150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_float16_batch_major_aux_input.example.cpp b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_float16_batch_major_aux_input.example.cpp index 41be21e..7ce38fd 100644 --- a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_float16_batch_major_aux_input.example.cpp +++ b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_float16_batch_major_aux_input.example.cpp
@@ -1310,7 +1310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1340,7 +1340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1370,7 +1370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1400,7 +1400,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1430,7 +1430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1460,7 +1460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1490,7 +1490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1520,7 +1520,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1550,7 +1550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1580,7 +1580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1610,7 +1610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1640,7 +1640,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1670,7 +1670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1700,7 +1700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1730,7 +1730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1760,7 +1760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1790,7 +1790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1820,7 +1820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1850,7 +1850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1880,7 +1880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1910,7 +1910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1940,7 +1940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1970,7 +1970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2000,7 +2000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2030,7 +2030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2060,7 +2060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2090,7 +2090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2120,7 +2120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2150,7 +2150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2180,7 +2180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2210,7 +2210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2240,7 +2240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2270,7 +2270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2300,7 +2300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2330,7 +2330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2360,7 +2360,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2390,7 +2390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2420,7 +2420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_float16_batch_major_merge_outputs.example.cpp b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_float16_batch_major_merge_outputs.example.cpp index 3d8f37e..ee35479 100644 --- a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_float16_batch_major_merge_outputs.example.cpp +++ b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_float16_batch_major_merge_outputs.example.cpp
@@ -1290,7 +1290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1320,7 +1320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1350,7 +1350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1380,7 +1380,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1410,7 +1410,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1440,7 +1440,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1470,7 +1470,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1500,7 +1500,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1530,7 +1530,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1560,7 +1560,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1590,7 +1590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1620,7 +1620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1650,7 +1650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1680,7 +1680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1710,7 +1710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1740,7 +1740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1770,7 +1770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1800,7 +1800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1830,7 +1830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1860,7 +1860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1890,7 +1890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1920,7 +1920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1950,7 +1950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1980,7 +1980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2010,7 +2010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2040,7 +2040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2070,7 +2070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2100,7 +2100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2130,7 +2130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_merge_outputs.example.cpp b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_merge_outputs.example.cpp index b929bfa..1e8c006 100644 --- a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_merge_outputs.example.cpp +++ b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_merge_outputs.example.cpp
@@ -1290,7 +1290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1320,7 +1320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1350,7 +1350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1380,7 +1380,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1410,7 +1410,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1440,7 +1440,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1470,7 +1470,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1500,7 +1500,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1530,7 +1530,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1560,7 +1560,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1590,7 +1590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1620,7 +1620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1650,7 +1650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1680,7 +1680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1710,7 +1710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1740,7 +1740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1770,7 +1770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1800,7 +1800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1830,7 +1830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1860,7 +1860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1890,7 +1890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1920,7 +1920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1950,7 +1950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1980,7 +1980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2010,7 +2010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2040,7 +2040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2070,7 +2070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2100,7 +2100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2130,7 +2130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_norm_fw_output.example.cpp b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_norm_fw_output.example.cpp index 24c323e..3e03dcb 100644 --- a/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_norm_fw_output.example.cpp +++ b/runtime/test/generated/spec_V1_2/bidirectional_sequence_lstm_norm_fw_output.example.cpp
@@ -1310,7 +1310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1340,7 +1340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1370,7 +1370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1400,7 +1400,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1430,7 +1430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1460,7 +1460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1490,7 +1490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1520,7 +1520,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1550,7 +1550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1580,7 +1580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1610,7 +1610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1640,7 +1640,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1670,7 +1670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1700,7 +1700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1730,7 +1730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1760,7 +1760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1790,7 +1790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1820,7 +1820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1850,7 +1850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1880,7 +1880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1910,7 +1910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1940,7 +1940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1970,7 +1970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2000,7 +2000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2030,7 +2030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2060,7 +2060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2090,7 +2090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2120,7 +2120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2150,7 +2150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2180,7 +2180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2210,7 +2210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2240,7 +2240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2270,7 +2270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2300,7 +2300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2330,7 +2330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2360,7 +2360,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2390,7 +2390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2420,7 +2420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2450,7 +2450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2480,7 +2480,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2510,7 +2510,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2540,7 +2540,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2570,7 +2570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2600,7 +2600,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2630,7 +2630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/bidirectional_sequence_rnn.example.cpp b/runtime/test/generated/spec_V1_2/bidirectional_sequence_rnn.example.cpp index 959faa9..ff657d8 100644 --- a/runtime/test/generated/spec_V1_2/bidirectional_sequence_rnn.example.cpp +++ b/runtime/test/generated/spec_V1_2/bidirectional_sequence_rnn.example.cpp
@@ -390,7 +390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -420,7 +420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -450,7 +450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -480,7 +480,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -510,7 +510,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -540,7 +540,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -570,7 +570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -600,7 +600,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -630,7 +630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1090,7 +1090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1120,7 +1120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1150,7 +1150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1180,7 +1180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1210,7 +1210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1240,7 +1240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1270,7 +1270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1300,7 +1300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1330,7 +1330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1790,7 +1790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1820,7 +1820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1850,7 +1850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1880,7 +1880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1910,7 +1910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1940,7 +1940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1970,7 +1970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2000,7 +2000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2030,7 +2030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2490,7 +2490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2520,7 +2520,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2550,7 +2550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2580,7 +2580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2610,7 +2610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2640,7 +2640,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2670,7 +2670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2700,7 +2700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2730,7 +2730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3190,7 +3190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3220,7 +3220,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3250,7 +3250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3280,7 +3280,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3310,7 +3310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3340,7 +3340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3370,7 +3370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3400,7 +3400,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3430,7 +3430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3890,7 +3890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3920,7 +3920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3950,7 +3950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3980,7 +3980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4010,7 +4010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4040,7 +4040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4070,7 +4070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4100,7 +4100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4130,7 +4130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4570,7 +4570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4600,7 +4600,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4630,7 +4630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4660,7 +4660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4690,7 +4690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4720,7 +4720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4750,7 +4750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4780,7 +4780,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4810,7 +4810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5250,7 +5250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5280,7 +5280,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5310,7 +5310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5340,7 +5340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5370,7 +5370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5400,7 +5400,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5430,7 +5430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5460,7 +5460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5490,7 +5490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5930,7 +5930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5960,7 +5960,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5990,7 +5990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6020,7 +6020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6050,7 +6050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6080,7 +6080,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6110,7 +6110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6140,7 +6140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6170,7 +6170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6630,7 +6630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6660,7 +6660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6690,7 +6690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6720,7 +6720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6750,7 +6750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6780,7 +6780,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6810,7 +6810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6840,7 +6840,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6870,7 +6870,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7330,7 +7330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7360,7 +7360,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7390,7 +7390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7420,7 +7420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7450,7 +7450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7480,7 +7480,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7510,7 +7510,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7540,7 +7540,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7570,7 +7570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8030,7 +8030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8060,7 +8060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8090,7 +8090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8120,7 +8120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8150,7 +8150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8180,7 +8180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8210,7 +8210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8240,7 +8240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8270,7 +8270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8730,7 +8730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8760,7 +8760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8790,7 +8790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8820,7 +8820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8850,7 +8850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8880,7 +8880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8910,7 +8910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8940,7 +8940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8970,7 +8970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9000,7 +9000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9030,7 +9030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9060,7 +9060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9532,7 +9532,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9562,7 +9562,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9592,7 +9592,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9622,7 +9622,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder123 + }, { // dummy123 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9652,7 +9652,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder124 + }, { // dummy124 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9682,7 +9682,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9712,7 +9712,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9742,7 +9742,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9772,7 +9772,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder128 + }, { // dummy128 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9802,7 +9802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder129 + }, { // dummy129 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9832,7 +9832,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder130 + }, { // dummy130 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9862,7 +9862,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder131 + }, { // dummy131 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10334,7 +10334,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder132 + }, { // dummy132 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10364,7 +10364,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder133 + }, { // dummy133 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10394,7 +10394,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder134 + }, { // dummy134 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10424,7 +10424,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder135 + }, { // dummy135 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10454,7 +10454,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder136 + }, { // dummy136 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10484,7 +10484,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder137 + }, { // dummy137 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10514,7 +10514,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder138 + }, { // dummy138 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10544,7 +10544,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder139 + }, { // dummy139 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10574,7 +10574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder140 + }, { // dummy140 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10604,7 +10604,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder141 + }, { // dummy141 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10634,7 +10634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder142 + }, { // dummy142 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10664,7 +10664,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder143 + }, { // dummy143 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11136,7 +11136,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder144 + }, { // dummy144 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11166,7 +11166,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder145 + }, { // dummy145 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11196,7 +11196,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder146 + }, { // dummy146 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11226,7 +11226,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder147 + }, { // dummy147 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11256,7 +11256,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder148 + }, { // dummy148 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11286,7 +11286,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder149 + }, { // dummy149 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11316,7 +11316,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder150 + }, { // dummy150 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11346,7 +11346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder151 + }, { // dummy151 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11376,7 +11376,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder152 + }, { // dummy152 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11406,7 +11406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder153 + }, { // dummy153 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11436,7 +11436,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder154 + }, { // dummy154 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11466,7 +11466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder155 + }, { // dummy155 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11938,7 +11938,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder156 + }, { // dummy156 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11968,7 +11968,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder157 + }, { // dummy157 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11998,7 +11998,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder158 + }, { // dummy158 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12028,7 +12028,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder159 + }, { // dummy159 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12058,7 +12058,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder160 + }, { // dummy160 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12088,7 +12088,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder161 + }, { // dummy161 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12118,7 +12118,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder162 + }, { // dummy162 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12148,7 +12148,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder163 + }, { // dummy163 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12178,7 +12178,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder164 + }, { // dummy164 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12208,7 +12208,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder165 + }, { // dummy165 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12238,7 +12238,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder166 + }, { // dummy166 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12268,7 +12268,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder167 + }, { // dummy167 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12740,7 +12740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder168 + }, { // dummy168 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12770,7 +12770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder169 + }, { // dummy169 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12800,7 +12800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder170 + }, { // dummy170 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12830,7 +12830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder171 + }, { // dummy171 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12860,7 +12860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder172 + }, { // dummy172 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12890,7 +12890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder173 + }, { // dummy173 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12920,7 +12920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder174 + }, { // dummy174 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12950,7 +12950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder175 + }, { // dummy175 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12980,7 +12980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder176 + }, { // dummy176 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13010,7 +13010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder177 + }, { // dummy177 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13040,7 +13040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder178 + }, { // dummy178 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13070,7 +13070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder179 + }, { // dummy179 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/box_with_nms_limit_gaussian.example.cpp b/runtime/test/generated/spec_V1_2/box_with_nms_limit_gaussian.example.cpp index 5210031..b59f4dc 100644 --- a/runtime/test/generated/spec_V1_2/box_with_nms_limit_gaussian.example.cpp +++ b/runtime/test/generated/spec_V1_2/box_with_nms_limit_gaussian.example.cpp
@@ -310,7 +310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -340,7 +340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -692,7 +692,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -722,7 +722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1074,7 +1074,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1104,7 +1104,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1456,7 +1456,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1804,7 +1804,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1834,7 +1834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2186,7 +2186,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2216,7 +2216,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2568,7 +2568,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2598,7 +2598,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2950,7 +2950,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/box_with_nms_limit_hard.example.cpp b/runtime/test/generated/spec_V1_2/box_with_nms_limit_hard.example.cpp index c6863bc..e080839 100644 --- a/runtime/test/generated/spec_V1_2/box_with_nms_limit_hard.example.cpp +++ b/runtime/test/generated/spec_V1_2/box_with_nms_limit_hard.example.cpp
@@ -310,7 +310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -340,7 +340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -692,7 +692,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -722,7 +722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1074,7 +1074,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1104,7 +1104,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1456,7 +1456,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1804,7 +1804,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1834,7 +1834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2186,7 +2186,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2216,7 +2216,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2568,7 +2568,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2598,7 +2598,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2950,7 +2950,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/box_with_nms_limit_linear.example.cpp b/runtime/test/generated/spec_V1_2/box_with_nms_limit_linear.example.cpp index 8722699..d2e02fd 100644 --- a/runtime/test/generated/spec_V1_2/box_with_nms_limit_linear.example.cpp +++ b/runtime/test/generated/spec_V1_2/box_with_nms_limit_linear.example.cpp
@@ -310,7 +310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -340,7 +340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -692,7 +692,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -722,7 +722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1074,7 +1074,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1104,7 +1104,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1456,7 +1456,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1804,7 +1804,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1834,7 +1834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2186,7 +2186,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2216,7 +2216,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2568,7 +2568,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2598,7 +2598,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2950,7 +2950,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/cast.example.cpp b/runtime/test/generated/spec_V1_2/cast.example.cpp index d576c62..4bc4fbb 100644 --- a/runtime/test/generated/spec_V1_2/cast.example.cpp +++ b/runtime/test/generated/spec_V1_2/cast.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -346,7 +346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -602,7 +602,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -730,7 +730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -858,7 +858,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -986,7 +986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1114,7 +1114,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1242,7 +1242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1370,7 +1370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1498,7 +1498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1626,7 +1626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1989,7 +1989,7 @@ .scale = 4.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2117,7 +2117,7 @@ .scale = 4.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2245,7 +2245,7 @@ .scale = 4.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2373,7 +2373,7 @@ .scale = 4.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2501,7 +2501,7 @@ .scale = 4.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2629,7 +2629,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2757,7 +2757,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2885,7 +2885,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/channel_shuffle.example.cpp b/runtime/test/generated/spec_V1_2/channel_shuffle.example.cpp index 5eee7ed..c7ea964 100644 --- a/runtime/test/generated/spec_V1_2/channel_shuffle.example.cpp +++ b/runtime/test/generated/spec_V1_2/channel_shuffle.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1474,7 +1474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1642,7 +1642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2146,7 +2146,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2314,7 +2314,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2482,7 +2482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2650,7 +2650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2818,7 +2818,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2986,7 +2986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3154,7 +3154,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3322,7 +3322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3490,7 +3490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3658,7 +3658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3826,7 +3826,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3994,7 +3994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4162,7 +4162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4330,7 +4330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4498,7 +4498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4666,7 +4666,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4834,7 +4834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5002,7 +5002,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5170,7 +5170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5338,7 +5338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5506,7 +5506,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5674,7 +5674,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5842,7 +5842,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6010,7 +6010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6178,7 +6178,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6346,7 +6346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6514,7 +6514,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6682,7 +6682,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6850,7 +6850,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7018,7 +7018,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7186,7 +7186,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7354,7 +7354,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7522,7 +7522,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7690,7 +7690,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7858,7 +7858,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -8026,7 +8026,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -8194,7 +8194,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -8362,7 +8362,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -8530,7 +8530,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -8698,7 +8698,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -8866,7 +8866,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -9034,7 +9034,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -9202,7 +9202,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -9370,7 +9370,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -9538,7 +9538,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -9706,7 +9706,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -9874,7 +9874,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -10042,7 +10042,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -10210,7 +10210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10378,7 +10378,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10546,7 +10546,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10714,7 +10714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10882,7 +10882,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11050,7 +11050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11218,7 +11218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11386,7 +11386,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11554,7 +11554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11722,7 +11722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11890,7 +11890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12058,7 +12058,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12226,7 +12226,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12394,7 +12394,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12562,7 +12562,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12730,7 +12730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12898,7 +12898,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13066,7 +13066,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13234,7 +13234,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13402,7 +13402,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/concat_float16_1.example.cpp b/runtime/test/generated/spec_V1_2/concat_float16_1.example.cpp index 6bf94ea..d2d0cd0 100644 --- a/runtime/test/generated/spec_V1_2/concat_float16_1.example.cpp +++ b/runtime/test/generated/spec_V1_2/concat_float16_1.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/concat_float16_2.example.cpp b/runtime/test/generated/spec_V1_2/concat_float16_2.example.cpp index 8fd5e40..2aa1a7b 100644 --- a/runtime/test/generated/spec_V1_2/concat_float16_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/concat_float16_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/concat_float16_3.example.cpp b/runtime/test/generated/spec_V1_2/concat_float16_3.example.cpp index bb91485..e17d173 100644 --- a/runtime/test/generated/spec_V1_2/concat_float16_3.example.cpp +++ b/runtime/test/generated/spec_V1_2/concat_float16_3.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/concat_mixed_quant.example.cpp b/runtime/test/generated/spec_V1_2/concat_mixed_quant.example.cpp index d1d4400..faa4df7 100644 --- a/runtime/test/generated/spec_V1_2/concat_mixed_quant.example.cpp +++ b/runtime/test/generated/spec_V1_2/concat_mixed_quant.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.084f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -200,7 +200,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -230,7 +230,7 @@ .scale = 0.089f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 123 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({123}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.029f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -480,7 +480,7 @@ .scale = 0.084f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -510,7 +510,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -540,7 +540,7 @@ .scale = 0.089f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 123 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({123}), .dimensions = {1}, @@ -570,7 +570,7 @@ .scale = 0.029f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/conv2d_dilation.example.cpp b/runtime/test/generated/spec_V1_2/conv2d_dilation.example.cpp index b08598c..0bcdcf7 100644 --- a/runtime/test/generated/spec_V1_2/conv2d_dilation.example.cpp +++ b/runtime/test/generated/spec_V1_2/conv2d_dilation.example.cpp
@@ -330,7 +330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -698,7 +698,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -728,7 +728,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -758,7 +758,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1134,7 +1134,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1502,7 +1502,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1532,7 +1532,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1562,7 +1562,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1938,7 +1938,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2306,7 +2306,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2336,7 +2336,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2708,7 +2708,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3076,7 +3076,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3106,7 +3106,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3136,7 +3136,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3512,7 +3512,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3880,7 +3880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3910,7 +3910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3940,7 +3940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4316,7 +4316,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4684,7 +4684,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4714,7 +4714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4744,7 +4744,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5120,7 +5120,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5488,7 +5488,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5518,7 +5518,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5890,7 +5890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6258,7 +6258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6288,7 +6288,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6318,7 +6318,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6694,7 +6694,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7062,7 +7062,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7092,7 +7092,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7122,7 +7122,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7498,7 +7498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7866,7 +7866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7896,7 +7896,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7926,7 +7926,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8302,7 +8302,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -8670,7 +8670,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -8700,7 +8700,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -9072,7 +9072,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9440,7 +9440,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9470,7 +9470,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9500,7 +9500,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9876,7 +9876,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10244,7 +10244,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10274,7 +10274,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10304,7 +10304,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10680,7 +10680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11048,7 +11048,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11078,7 +11078,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11108,7 +11108,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11484,7 +11484,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -11852,7 +11852,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -11882,7 +11882,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -12254,7 +12254,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12622,7 +12622,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12652,7 +12652,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12682,7 +12682,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12998,7 +12998,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13306,7 +13306,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13336,7 +13336,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13366,7 +13366,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13682,7 +13682,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13990,7 +13990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14020,7 +14020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14050,7 +14050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14366,7 +14366,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -14674,7 +14674,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -14704,7 +14704,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -15016,7 +15016,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15324,7 +15324,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15354,7 +15354,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15384,7 +15384,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15700,7 +15700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16008,7 +16008,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16038,7 +16038,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16068,7 +16068,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16384,7 +16384,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16692,7 +16692,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16722,7 +16722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16752,7 +16752,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17068,7 +17068,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -17376,7 +17376,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -17406,7 +17406,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -17718,7 +17718,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -18026,7 +18026,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -18056,7 +18056,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -18086,7 +18086,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -18402,7 +18402,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18710,7 +18710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18740,7 +18740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18770,7 +18770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19086,7 +19086,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19394,7 +19394,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19424,7 +19424,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19454,7 +19454,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19770,7 +19770,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -20078,7 +20078,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -20108,7 +20108,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -20420,7 +20420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20728,7 +20728,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20758,7 +20758,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20788,7 +20788,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21104,7 +21104,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21412,7 +21412,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21442,7 +21442,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21472,7 +21472,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21788,7 +21788,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22096,7 +22096,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22126,7 +22126,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22156,7 +22156,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22472,7 +22472,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -22780,7 +22780,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -22810,7 +22810,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -23122,7 +23122,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -23430,7 +23430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -23460,7 +23460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -23490,7 +23490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -23806,7 +23806,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24114,7 +24114,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24144,7 +24144,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24174,7 +24174,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder123 + }, { // dummy123 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24490,7 +24490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder124 + }, { // dummy124 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24798,7 +24798,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24828,7 +24828,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24858,7 +24858,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25174,7 +25174,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder128 + }, { // dummy128 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -25482,7 +25482,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder129 + }, { // dummy129 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -25512,7 +25512,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder130 + }, { // dummy130 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -25824,7 +25824,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder131 + }, { // dummy131 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26132,7 +26132,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder132 + }, { // dummy132 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26162,7 +26162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder133 + }, { // dummy133 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26192,7 +26192,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder134 + }, { // dummy134 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26508,7 +26508,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder135 + }, { // dummy135 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26816,7 +26816,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder136 + }, { // dummy136 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26846,7 +26846,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder137 + }, { // dummy137 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26876,7 +26876,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder138 + }, { // dummy138 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27192,7 +27192,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder139 + }, { // dummy139 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27500,7 +27500,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder140 + }, { // dummy140 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27530,7 +27530,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder141 + }, { // dummy141 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27560,7 +27560,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder142 + }, { // dummy142 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27876,7 +27876,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder143 + }, { // dummy143 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -28184,7 +28184,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder144 + }, { // dummy144 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -28214,7 +28214,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder145 + }, { // dummy145 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -28526,7 +28526,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder146 + }, { // dummy146 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28834,7 +28834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder147 + }, { // dummy147 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28864,7 +28864,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder148 + }, { // dummy148 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28894,7 +28894,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder149 + }, { // dummy149 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/conv2d_per_channel.example.cpp b/runtime/test/generated/spec_V1_2/conv2d_per_channel.example.cpp index 218b7f3..61b01d7 100644 --- a/runtime/test/generated/spec_V1_2/conv2d_per_channel.example.cpp +++ b/runtime/test/generated/spec_V1_2/conv2d_per_channel.example.cpp
@@ -276,7 +276,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -590,7 +590,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -924,7 +924,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1258,7 +1258,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1592,7 +1592,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1926,7 +1926,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/conv2d_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/conv2d_v1_2.example.cpp index 1c833bd..63fe05c 100644 --- a/runtime/test/generated/spec_V1_2/conv2d_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/conv2d_v1_2.example.cpp
@@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -618,7 +618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -648,7 +648,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -678,7 +678,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1014,7 +1014,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1342,7 +1342,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1372,7 +1372,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1402,7 +1402,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1738,7 +1738,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2066,7 +2066,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2096,7 +2096,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2434,7 +2434,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2768,7 +2768,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3096,7 +3096,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3424,7 +3424,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3454,7 +3454,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3484,7 +3484,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3820,7 +3820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4148,7 +4148,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4178,7 +4178,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4208,7 +4208,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4544,7 +4544,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4872,7 +4872,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4902,7 +4902,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4932,7 +4932,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5268,7 +5268,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5596,7 +5596,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5626,7 +5626,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5964,7 +5964,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6298,7 +6298,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6626,7 +6626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6954,7 +6954,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6984,7 +6984,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7014,7 +7014,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7290,7 +7290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7558,7 +7558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7588,7 +7588,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7618,7 +7618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7894,7 +7894,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8162,7 +8162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8192,7 +8192,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8222,7 +8222,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8498,7 +8498,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -8766,7 +8766,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -8796,7 +8796,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -9074,7 +9074,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -9348,7 +9348,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -9616,7 +9616,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9884,7 +9884,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9914,7 +9914,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9944,7 +9944,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10220,7 +10220,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10488,7 +10488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10518,7 +10518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10548,7 +10548,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10824,7 +10824,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11092,7 +11092,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11122,7 +11122,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11152,7 +11152,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11428,7 +11428,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -11696,7 +11696,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -11726,7 +11726,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -12004,7 +12004,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -12278,7 +12278,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -12546,7 +12546,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12814,7 +12814,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12844,7 +12844,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12874,7 +12874,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13210,7 +13210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13538,7 +13538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13568,7 +13568,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13598,7 +13598,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13934,7 +13934,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14262,7 +14262,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14292,7 +14292,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14322,7 +14322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14658,7 +14658,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -14986,7 +14986,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -15016,7 +15016,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -15354,7 +15354,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -15688,7 +15688,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -16016,7 +16016,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -16344,7 +16344,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -16374,7 +16374,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -16404,7 +16404,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -16740,7 +16740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17068,7 +17068,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17098,7 +17098,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17128,7 +17128,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17464,7 +17464,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17792,7 +17792,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17822,7 +17822,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17852,7 +17852,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18188,7 +18188,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -18516,7 +18516,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -18546,7 +18546,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -18884,7 +18884,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -19218,7 +19218,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -19546,7 +19546,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -19874,7 +19874,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -19904,7 +19904,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -19934,7 +19934,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20270,7 +20270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20598,7 +20598,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20628,7 +20628,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20658,7 +20658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20994,7 +20994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21322,7 +21322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21352,7 +21352,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21382,7 +21382,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21718,7 +21718,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -22046,7 +22046,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -22076,7 +22076,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -22414,7 +22414,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -22748,7 +22748,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -23082,7 +23082,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -23416,7 +23416,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -23744,7 +23744,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24072,7 +24072,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24102,7 +24102,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24132,7 +24132,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24468,7 +24468,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24796,7 +24796,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24826,7 +24826,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder123 + }, { // dummy123 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24856,7 +24856,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder124 + }, { // dummy124 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25192,7 +25192,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25520,7 +25520,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25550,7 +25550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25580,7 +25580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder128 + }, { // dummy128 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25916,7 +25916,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder129 + }, { // dummy129 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -26244,7 +26244,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder130 + }, { // dummy130 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -26274,7 +26274,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder131 + }, { // dummy131 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -26612,7 +26612,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder132 + }, { // dummy132 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -26946,7 +26946,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder133 + }, { // dummy133 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -27280,7 +27280,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder134 + }, { // dummy134 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -27614,7 +27614,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder135 + }, { // dummy135 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -27942,7 +27942,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder136 + }, { // dummy136 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28270,7 +28270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder137 + }, { // dummy137 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28300,7 +28300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder138 + }, { // dummy138 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28330,7 +28330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder139 + }, { // dummy139 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28606,7 +28606,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder140 + }, { // dummy140 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -28874,7 +28874,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder141 + }, { // dummy141 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -28904,7 +28904,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder142 + }, { // dummy142 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -28934,7 +28934,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder143 + }, { // dummy143 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -29210,7 +29210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder144 + }, { // dummy144 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -29478,7 +29478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder145 + }, { // dummy145 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -29508,7 +29508,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder146 + }, { // dummy146 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -29538,7 +29538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder147 + }, { // dummy147 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -29814,7 +29814,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder148 + }, { // dummy148 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30082,7 +30082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder149 + }, { // dummy149 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30112,7 +30112,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder150 + }, { // dummy150 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30142,7 +30142,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder151 + }, { // dummy151 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30418,7 +30418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder152 + }, { // dummy152 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -30686,7 +30686,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder153 + }, { // dummy153 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -30716,7 +30716,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder154 + }, { // dummy154 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -30746,7 +30746,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder155 + }, { // dummy155 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31022,7 +31022,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder156 + }, { // dummy156 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31290,7 +31290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder157 + }, { // dummy157 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31320,7 +31320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder158 + }, { // dummy158 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31350,7 +31350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder159 + }, { // dummy159 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31626,7 +31626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder160 + }, { // dummy160 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -31894,7 +31894,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder161 + }, { // dummy161 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -31924,7 +31924,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder162 + }, { // dummy162 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -31954,7 +31954,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder163 + }, { // dummy163 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -32230,7 +32230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder164 + }, { // dummy164 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -32498,7 +32498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder165 + }, { // dummy165 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -32528,7 +32528,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder166 + }, { // dummy166 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -32558,7 +32558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder167 + }, { // dummy167 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -32834,7 +32834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder168 + }, { // dummy168 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -33102,7 +33102,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder169 + }, { // dummy169 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -33132,7 +33132,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder170 + }, { // dummy170 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -33162,7 +33162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder171 + }, { // dummy171 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -33438,7 +33438,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder172 + }, { // dummy172 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -33706,7 +33706,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder173 + }, { // dummy173 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -33736,7 +33736,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder174 + }, { // dummy174 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -33766,7 +33766,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder175 + }, { // dummy175 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -34042,7 +34042,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder176 + }, { // dummy176 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -34310,7 +34310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder177 + }, { // dummy177 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -34340,7 +34340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder178 + }, { // dummy178 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -34370,7 +34370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder179 + }, { // dummy179 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -34646,7 +34646,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder180 + }, { // dummy180 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -34914,7 +34914,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder181 + }, { // dummy181 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -34944,7 +34944,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder182 + }, { // dummy182 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -34974,7 +34974,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder183 + }, { // dummy183 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -35250,7 +35250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder184 + }, { // dummy184 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -35518,7 +35518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder185 + }, { // dummy185 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -35548,7 +35548,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder186 + }, { // dummy186 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -35578,7 +35578,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder187 + }, { // dummy187 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -35854,7 +35854,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder188 + }, { // dummy188 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -36122,7 +36122,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder189 + }, { // dummy189 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -36152,7 +36152,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder190 + }, { // dummy190 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -36182,7 +36182,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder191 + }, { // dummy191 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -36458,7 +36458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder192 + }, { // dummy192 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -36726,7 +36726,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder193 + }, { // dummy193 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -36756,7 +36756,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder194 + }, { // dummy194 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -36786,7 +36786,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder195 + }, { // dummy195 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -37062,7 +37062,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder196 + }, { // dummy196 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -37330,7 +37330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder197 + }, { // dummy197 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -37360,7 +37360,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder198 + }, { // dummy198 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -37390,7 +37390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder199 + }, { // dummy199 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -37666,7 +37666,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder200 + }, { // dummy200 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -37934,7 +37934,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder201 + }, { // dummy201 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -37964,7 +37964,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder202 + }, { // dummy202 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -37994,7 +37994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder203 + }, { // dummy203 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38270,7 +38270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder204 + }, { // dummy204 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38538,7 +38538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder205 + }, { // dummy205 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38568,7 +38568,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder206 + }, { // dummy206 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38598,7 +38598,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder207 + }, { // dummy207 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38874,7 +38874,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder208 + }, { // dummy208 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -39142,7 +39142,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder209 + }, { // dummy209 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -39172,7 +39172,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder210 + }, { // dummy210 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -39202,7 +39202,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder211 + }, { // dummy211 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -39478,7 +39478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder212 + }, { // dummy212 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -39746,7 +39746,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder213 + }, { // dummy213 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -39776,7 +39776,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder214 + }, { // dummy214 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -39806,7 +39806,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder215 + }, { // dummy215 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -40082,7 +40082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder216 + }, { // dummy216 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -40350,7 +40350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder217 + }, { // dummy217 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -40380,7 +40380,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder218 + }, { // dummy218 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -40410,7 +40410,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder219 + }, { // dummy219 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -40686,7 +40686,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder220 + }, { // dummy220 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -40954,7 +40954,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder221 + }, { // dummy221 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -40984,7 +40984,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder222 + }, { // dummy222 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -41014,7 +41014,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder223 + }, { // dummy223 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -41290,7 +41290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder224 + }, { // dummy224 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41558,7 +41558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder225 + }, { // dummy225 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41588,7 +41588,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder226 + }, { // dummy226 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41618,7 +41618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder227 + }, { // dummy227 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41894,7 +41894,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder228 + }, { // dummy228 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42162,7 +42162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder229 + }, { // dummy229 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42192,7 +42192,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder230 + }, { // dummy230 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42222,7 +42222,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder231 + }, { // dummy231 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42498,7 +42498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder232 + }, { // dummy232 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -42766,7 +42766,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder233 + }, { // dummy233 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -42796,7 +42796,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder234 + }, { // dummy234 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -42826,7 +42826,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder235 + }, { // dummy235 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -43102,7 +43102,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder236 + }, { // dummy236 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -43370,7 +43370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder237 + }, { // dummy237 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -43400,7 +43400,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder238 + }, { // dummy238 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -43430,7 +43430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder239 + }, { // dummy239 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -43706,7 +43706,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder240 + }, { // dummy240 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -43974,7 +43974,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder241 + }, { // dummy241 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -44004,7 +44004,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder242 + }, { // dummy242 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -44034,7 +44034,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder243 + }, { // dummy243 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -44310,7 +44310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder244 + }, { // dummy244 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -44578,7 +44578,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder245 + }, { // dummy245 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -44608,7 +44608,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder246 + }, { // dummy246 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -44638,7 +44638,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder247 + }, { // dummy247 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -44914,7 +44914,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder248 + }, { // dummy248 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45182,7 +45182,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder249 + }, { // dummy249 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45212,7 +45212,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder250 + }, { // dummy250 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45242,7 +45242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder251 + }, { // dummy251 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45518,7 +45518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder252 + }, { // dummy252 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45786,7 +45786,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder253 + }, { // dummy253 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45816,7 +45816,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder254 + }, { // dummy254 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45846,7 +45846,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder255 + }, { // dummy255 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -46122,7 +46122,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder256 + }, { // dummy256 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -46390,7 +46390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder257 + }, { // dummy257 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -46420,7 +46420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder258 + }, { // dummy258 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -46450,7 +46450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder259 + }, { // dummy259 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -46726,7 +46726,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder260 + }, { // dummy260 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -46994,7 +46994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder261 + }, { // dummy261 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -47024,7 +47024,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder262 + }, { // dummy262 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -47054,7 +47054,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder263 + }, { // dummy263 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -47330,7 +47330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder264 + }, { // dummy264 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -47598,7 +47598,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder265 + }, { // dummy265 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -47628,7 +47628,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder266 + }, { // dummy266 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -47658,7 +47658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder267 + }, { // dummy267 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -47934,7 +47934,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder268 + }, { // dummy268 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -48202,7 +48202,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder269 + }, { // dummy269 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -48232,7 +48232,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder270 + }, { // dummy270 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -48262,7 +48262,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder271 + }, { // dummy271 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -48538,7 +48538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder272 + }, { // dummy272 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48806,7 +48806,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder273 + }, { // dummy273 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48836,7 +48836,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder274 + }, { // dummy274 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48866,7 +48866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder275 + }, { // dummy275 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49142,7 +49142,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder276 + }, { // dummy276 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49410,7 +49410,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder277 + }, { // dummy277 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49440,7 +49440,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder278 + }, { // dummy278 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49470,7 +49470,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder279 + }, { // dummy279 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49746,7 +49746,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder280 + }, { // dummy280 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -50014,7 +50014,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder281 + }, { // dummy281 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -50044,7 +50044,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder282 + }, { // dummy282 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -50074,7 +50074,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder283 + }, { // dummy283 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -50350,7 +50350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder284 + }, { // dummy284 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -50618,7 +50618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder285 + }, { // dummy285 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -50648,7 +50648,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder286 + }, { // dummy286 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -50678,7 +50678,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder287 + }, { // dummy287 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -50954,7 +50954,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder288 + }, { // dummy288 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51222,7 +51222,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder289 + }, { // dummy289 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51252,7 +51252,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder290 + }, { // dummy290 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51282,7 +51282,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder291 + }, { // dummy291 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51558,7 +51558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder292 + }, { // dummy292 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -51826,7 +51826,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder293 + }, { // dummy293 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -51856,7 +51856,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder294 + }, { // dummy294 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -51886,7 +51886,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder295 + }, { // dummy295 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -52162,7 +52162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder296 + }, { // dummy296 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -52430,7 +52430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder297 + }, { // dummy297 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -52460,7 +52460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder298 + }, { // dummy298 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -52490,7 +52490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder299 + }, { // dummy299 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -52766,7 +52766,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder300 + }, { // dummy300 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -53034,7 +53034,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder301 + }, { // dummy301 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -53064,7 +53064,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder302 + }, { // dummy302 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -53094,7 +53094,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder303 + }, { // dummy303 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -53370,7 +53370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder304 + }, { // dummy304 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -53638,7 +53638,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder305 + }, { // dummy305 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -53668,7 +53668,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder306 + }, { // dummy306 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -53698,7 +53698,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder307 + }, { // dummy307 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -53974,7 +53974,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder308 + }, { // dummy308 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -54242,7 +54242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder309 + }, { // dummy309 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -54272,7 +54272,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder310 + }, { // dummy310 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -54302,7 +54302,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder311 + }, { // dummy311 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -54578,7 +54578,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder312 + }, { // dummy312 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -54846,7 +54846,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder313 + }, { // dummy313 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -54876,7 +54876,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder314 + }, { // dummy314 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -54906,7 +54906,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder315 + }, { // dummy315 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -55182,7 +55182,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder316 + }, { // dummy316 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -55450,7 +55450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder317 + }, { // dummy317 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -55480,7 +55480,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder318 + }, { // dummy318 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -55510,7 +55510,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder319 + }, { // dummy319 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -55786,7 +55786,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder320 + }, { // dummy320 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -56054,7 +56054,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder321 + }, { // dummy321 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -56084,7 +56084,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder322 + }, { // dummy322 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -56114,7 +56114,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder323 + }, { // dummy323 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -56390,7 +56390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder324 + }, { // dummy324 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -56658,7 +56658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder325 + }, { // dummy325 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -56688,7 +56688,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder326 + }, { // dummy326 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -56718,7 +56718,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder327 + }, { // dummy327 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -56994,7 +56994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder328 + }, { // dummy328 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -57262,7 +57262,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder329 + }, { // dummy329 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -57292,7 +57292,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder330 + }, { // dummy330 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -57322,7 +57322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder331 + }, { // dummy331 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -57578,7 +57578,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder332 + }, { // dummy332 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -57826,7 +57826,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder333 + }, { // dummy333 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -57856,7 +57856,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder334 + }, { // dummy334 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -58108,7 +58108,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder335 + }, { // dummy335 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -58356,7 +58356,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder336 + }, { // dummy336 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -58386,7 +58386,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder337 + }, { // dummy337 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/depth_to_space_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/depth_to_space_v1_2.example.cpp index dde80b4..b7890f7 100644 --- a/runtime/test/generated/spec_V1_2/depth_to_space_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/depth_to_space_v1_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1474,7 +1474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1642,7 +1642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2146,7 +2146,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2314,7 +2314,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2482,7 +2482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2650,7 +2650,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2818,7 +2818,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2986,7 +2986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3154,7 +3154,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3322,7 +3322,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3490,7 +3490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3658,7 +3658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3826,7 +3826,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3994,7 +3994,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/depthwise_conv2d_dilation.example.cpp b/runtime/test/generated/spec_V1_2/depthwise_conv2d_dilation.example.cpp index 28fc287..66ffb3b 100644 --- a/runtime/test/generated/spec_V1_2/depthwise_conv2d_dilation.example.cpp +++ b/runtime/test/generated/spec_V1_2/depthwise_conv2d_dilation.example.cpp
@@ -350,7 +350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -738,7 +738,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -768,7 +768,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -798,7 +798,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1194,7 +1194,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1582,7 +1582,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1612,7 +1612,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1642,7 +1642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2038,7 +2038,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2426,7 +2426,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2456,7 +2456,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2486,7 +2486,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2882,7 +2882,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3270,7 +3270,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3300,7 +3300,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3692,7 +3692,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4080,7 +4080,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4110,7 +4110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4140,7 +4140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4536,7 +4536,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4924,7 +4924,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4954,7 +4954,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4984,7 +4984,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5380,7 +5380,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5768,7 +5768,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5798,7 +5798,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5828,7 +5828,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6224,7 +6224,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6612,7 +6612,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6642,7 +6642,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -7034,7 +7034,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7422,7 +7422,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7452,7 +7452,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7482,7 +7482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7878,7 +7878,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8266,7 +8266,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8296,7 +8296,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8326,7 +8326,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8662,7 +8662,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8990,7 +8990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9020,7 +9020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9050,7 +9050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9386,7 +9386,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9714,7 +9714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9744,7 +9744,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9774,7 +9774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10110,7 +10110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10438,7 +10438,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10468,7 +10468,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10498,7 +10498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10834,7 +10834,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -11162,7 +11162,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -11192,7 +11192,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -11524,7 +11524,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11852,7 +11852,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11882,7 +11882,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11912,7 +11912,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12248,7 +12248,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12576,7 +12576,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12606,7 +12606,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12636,7 +12636,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12972,7 +12972,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13300,7 +13300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13330,7 +13330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13360,7 +13360,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13696,7 +13696,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -14024,7 +14024,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -14054,7 +14054,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -14386,7 +14386,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14714,7 +14714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14744,7 +14744,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14774,7 +14774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15110,7 +15110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15438,7 +15438,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15468,7 +15468,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15498,7 +15498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15834,7 +15834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16162,7 +16162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16192,7 +16192,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16222,7 +16222,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16558,7 +16558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16886,7 +16886,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16916,7 +16916,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16946,7 +16946,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17282,7 +17282,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -17610,7 +17610,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -17640,7 +17640,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -17972,7 +17972,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -18300,7 +18300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -18330,7 +18330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -18360,7 +18360,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -18696,7 +18696,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19024,7 +19024,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19054,7 +19054,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19084,7 +19084,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19420,7 +19420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19748,7 +19748,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19778,7 +19778,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19808,7 +19808,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20144,7 +20144,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -20472,7 +20472,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -20502,7 +20502,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -20834,7 +20834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21162,7 +21162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21192,7 +21192,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21222,7 +21222,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/depthwise_conv2d_invalid_filter_dims.example.cpp b/runtime/test/generated/spec_V1_2/depthwise_conv2d_invalid_filter_dims.example.cpp index 5645f0f..eca9eb7 100644 --- a/runtime/test/generated/spec_V1_2/depthwise_conv2d_invalid_filter_dims.example.cpp +++ b/runtime/test/generated/spec_V1_2/depthwise_conv2d_invalid_filter_dims.example.cpp
@@ -310,7 +310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -658,7 +658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -688,7 +688,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -718,7 +718,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1074,7 +1074,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1422,7 +1422,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1452,7 +1452,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1482,7 +1482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1838,7 +1838,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2186,7 +2186,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2216,7 +2216,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2246,7 +2246,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2608,7 +2608,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2962,7 +2962,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3310,7 +3310,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3658,7 +3658,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3688,7 +3688,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4040,7 +4040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4388,7 +4388,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4418,7 +4418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4448,7 +4448,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4804,7 +4804,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5152,7 +5152,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5182,7 +5182,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5212,7 +5212,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5568,7 +5568,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5916,7 +5916,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5946,7 +5946,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5976,7 +5976,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6338,7 +6338,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6692,7 +6692,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -7040,7 +7040,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -7388,7 +7388,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -7418,7 +7418,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/depthwise_conv2d_per_channel.example.cpp b/runtime/test/generated/spec_V1_2/depthwise_conv2d_per_channel.example.cpp index 3f3b5da..9a85005 100644 --- a/runtime/test/generated/spec_V1_2/depthwise_conv2d_per_channel.example.cpp +++ b/runtime/test/generated/spec_V1_2/depthwise_conv2d_per_channel.example.cpp
@@ -296,7 +296,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -630,7 +630,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -964,7 +964,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1298,7 +1298,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1652,7 +1652,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2006,7 +2006,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2360,7 +2360,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2714,7 +2714,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/depthwise_conv2d_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/depthwise_conv2d_v1_2.example.cpp index 6463914..eb31cae 100644 --- a/runtime/test/generated/spec_V1_2/depthwise_conv2d_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/depthwise_conv2d_v1_2.example.cpp
@@ -310,7 +310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -658,7 +658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -688,7 +688,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -718,7 +718,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1074,7 +1074,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1422,7 +1422,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1452,7 +1452,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1482,7 +1482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1838,7 +1838,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2186,7 +2186,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2216,7 +2216,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2246,7 +2246,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2608,7 +2608,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2962,7 +2962,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3316,7 +3316,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3670,7 +3670,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4018,7 +4018,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4366,7 +4366,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4396,7 +4396,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4748,7 +4748,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5096,7 +5096,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5126,7 +5126,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5156,7 +5156,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5512,7 +5512,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5860,7 +5860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5890,7 +5890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5920,7 +5920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6276,7 +6276,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6624,7 +6624,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6654,7 +6654,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6684,7 +6684,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7046,7 +7046,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -7400,7 +7400,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -7754,7 +7754,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -8108,7 +8108,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -8456,7 +8456,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -8804,7 +8804,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -8834,7 +8834,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -9126,7 +9126,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9414,7 +9414,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9444,7 +9444,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9474,7 +9474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9770,7 +9770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10058,7 +10058,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10088,7 +10088,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10118,7 +10118,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10414,7 +10414,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10702,7 +10702,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10732,7 +10732,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10762,7 +10762,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11058,7 +11058,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -11346,7 +11346,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -11376,7 +11376,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -11674,7 +11674,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -11968,7 +11968,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -12256,7 +12256,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12544,7 +12544,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12574,7 +12574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12604,7 +12604,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12900,7 +12900,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13188,7 +13188,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13218,7 +13218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13248,7 +13248,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13544,7 +13544,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13832,7 +13832,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13862,7 +13862,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13892,7 +13892,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -14188,7 +14188,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -14476,7 +14476,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -14506,7 +14506,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -14804,7 +14804,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -15098,7 +15098,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -15446,7 +15446,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15794,7 +15794,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15824,7 +15824,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15854,7 +15854,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16210,7 +16210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16558,7 +16558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16588,7 +16588,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16618,7 +16618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16974,7 +16974,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -17322,7 +17322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -17352,7 +17352,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -17382,7 +17382,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -17738,7 +17738,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -18086,7 +18086,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -18116,7 +18116,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -18474,7 +18474,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -18828,7 +18828,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -19176,7 +19176,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19524,7 +19524,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19554,7 +19554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19584,7 +19584,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19940,7 +19940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20288,7 +20288,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20318,7 +20318,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20348,7 +20348,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20704,7 +20704,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21052,7 +21052,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21082,7 +21082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21112,7 +21112,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21468,7 +21468,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -21816,7 +21816,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -21846,7 +21846,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -22204,7 +22204,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -22558,7 +22558,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -22906,7 +22906,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23254,7 +23254,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23284,7 +23284,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23314,7 +23314,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23670,7 +23670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24018,7 +24018,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24048,7 +24048,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24078,7 +24078,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24434,7 +24434,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24782,7 +24782,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24812,7 +24812,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24842,7 +24842,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25198,7 +25198,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -25546,7 +25546,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -25576,7 +25576,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -25934,7 +25934,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -26288,7 +26288,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -26636,7 +26636,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder123 + }, { // dummy123 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26984,7 +26984,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder124 + }, { // dummy124 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27014,7 +27014,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27044,7 +27044,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27400,7 +27400,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27748,7 +27748,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder128 + }, { // dummy128 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27778,7 +27778,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder129 + }, { // dummy129 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27808,7 +27808,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder130 + }, { // dummy130 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -28164,7 +28164,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder131 + }, { // dummy131 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28512,7 +28512,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder132 + }, { // dummy132 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28542,7 +28542,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder133 + }, { // dummy133 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28572,7 +28572,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder134 + }, { // dummy134 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28928,7 +28928,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder135 + }, { // dummy135 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -29276,7 +29276,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder136 + }, { // dummy136 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -29306,7 +29306,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder137 + }, { // dummy137 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -29664,7 +29664,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder138 + }, { // dummy138 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -30018,7 +30018,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder139 + }, { // dummy139 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -30286,7 +30286,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder140 + }, { // dummy140 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -30554,7 +30554,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder141 + }, { // dummy141 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -30584,7 +30584,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder142 + }, { // dummy142 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -30856,7 +30856,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder143 + }, { // dummy143 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -31124,7 +31124,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder144 + }, { // dummy144 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -31154,7 +31154,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder145 + }, { // dummy145 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/dequantize_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/dequantize_v1_2.example.cpp index 06988a2..f865b8e 100644 --- a/runtime/test/generated/spec_V1_2/dequantize_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/dequantize_v1_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -346,7 +346,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -602,7 +602,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -730,7 +730,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -1440,7 +1440,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/detection_postprocess.example.cpp b/runtime/test/generated/spec_V1_2/detection_postprocess.example.cpp index 56b74d9..15c4330 100644 --- a/runtime/test/generated/spec_V1_2/detection_postprocess.example.cpp +++ b/runtime/test/generated/spec_V1_2/detection_postprocess.example.cpp
@@ -410,7 +410,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -440,7 +440,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -470,7 +470,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -926,7 +926,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -956,7 +956,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -986,7 +986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1442,7 +1442,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1472,7 +1472,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1502,7 +1502,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1958,7 +1958,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1988,7 +1988,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2018,7 +2018,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2474,7 +2474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2504,7 +2504,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2534,7 +2534,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2990,7 +2990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3020,7 +3020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3050,7 +3050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3506,7 +3506,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3536,7 +3536,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3566,7 +3566,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4022,7 +4022,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4052,7 +4052,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4082,7 +4082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4538,7 +4538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4568,7 +4568,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4598,7 +4598,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5054,7 +5054,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5084,7 +5084,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5114,7 +5114,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5570,7 +5570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5600,7 +5600,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5630,7 +5630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6086,7 +6086,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6116,7 +6116,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6146,7 +6146,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/div_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/div_v1_2.example.cpp index 1ac2bdd..82c8380 100644 --- a/runtime/test/generated/spec_V1_2/div_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/div_v1_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -332,7 +332,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -362,7 +362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1389,7 +1389,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1419,7 +1419,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/embedding_lookup_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/embedding_lookup_v1_2.example.cpp index 7bd7a5f..d81bc22 100644 --- a/runtime/test/generated/spec_V1_2/embedding_lookup_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/embedding_lookup_v1_2.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/equal.example.cpp b/runtime/test/generated/spec_V1_2/equal.example.cpp index a8e42bc..ee8deb1 100644 --- a/runtime/test/generated/spec_V1_2/equal.example.cpp +++ b/runtime/test/generated/spec_V1_2/equal.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -349,7 +349,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -379,7 +379,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -531,7 +531,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -561,7 +561,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -713,7 +713,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -743,7 +743,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -952,7 +952,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -982,7 +982,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1134,7 +1134,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1164,7 +1164,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1316,7 +1316,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1346,7 +1346,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1498,7 +1498,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1528,7 +1528,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 129 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({129}), .dimensions = {1}, @@ -1680,7 +1680,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 31 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({31}), .dimensions = {1}, @@ -1710,7 +1710,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 240 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({240}), .dimensions = {1}, @@ -1862,7 +1862,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 240 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({240}), .dimensions = {1}, @@ -1892,7 +1892,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 31 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({31}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/expand_dims.example.cpp b/runtime/test/generated/spec_V1_2/expand_dims.example.cpp index 8e326b9..f949735 100644 --- a/runtime/test/generated/spec_V1_2/expand_dims.example.cpp +++ b/runtime/test/generated/spec_V1_2/expand_dims.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -611,7 +611,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -759,7 +759,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -907,7 +907,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1055,7 +1055,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -1260,7 +1260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1408,7 +1408,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1556,7 +1556,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1704,7 +1704,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -1909,7 +1909,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2057,7 +2057,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2205,7 +2205,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2353,7 +2353,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -2558,7 +2558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/floor_float16.example.cpp b/runtime/test/generated/spec_V1_2/floor_float16.example.cpp index 95d0b9b..9676216 100644 --- a/runtime/test/generated/spec_V1_2/floor_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/floor_float16.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/fully_connected_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/fully_connected_v1_2.example.cpp index 80ec9fe..f4a1133 100644 --- a/runtime/test/generated/spec_V1_2/fully_connected_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/fully_connected_v1_2.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -398,7 +398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -594,7 +594,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -782,7 +782,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -812,7 +812,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -842,7 +842,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1038,7 +1038,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1226,7 +1226,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1256,7 +1256,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1286,7 +1286,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1482,7 +1482,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -1670,7 +1670,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -1700,7 +1700,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/gather.example.cpp b/runtime/test/generated/spec_V1_2/gather.example.cpp index 9918fbf..f87a62e 100644 --- a/runtime/test/generated/spec_V1_2/gather.example.cpp +++ b/runtime/test/generated/spec_V1_2/gather.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -701,7 +701,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -869,7 +869,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1037,7 +1037,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1205,7 +1205,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -1440,7 +1440,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1608,7 +1608,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1776,7 +1776,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1944,7 +1944,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -2179,7 +2179,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2347,7 +2347,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2515,7 +2515,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2683,7 +2683,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -2918,7 +2918,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3086,7 +3086,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3254,7 +3254,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3422,7 +3422,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -3657,7 +3657,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3825,7 +3825,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3993,7 +3993,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4161,7 +4161,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -4396,7 +4396,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4564,7 +4564,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4732,7 +4732,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4900,7 +4900,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -5135,7 +5135,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5303,7 +5303,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5471,7 +5471,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5639,7 +5639,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -5874,7 +5874,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/gather_higher_rank.example.cpp b/runtime/test/generated/spec_V1_2/gather_higher_rank.example.cpp index 55cc4ad..9912bb8 100644 --- a/runtime/test/generated/spec_V1_2/gather_higher_rank.example.cpp +++ b/runtime/test/generated/spec_V1_2/gather_higher_rank.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/generate_proposals.example.cpp b/runtime/test/generated/spec_V1_2/generate_proposals.example.cpp index 12ccf2b..e65c32a 100644 --- a/runtime/test/generated/spec_V1_2/generate_proposals.example.cpp +++ b/runtime/test/generated/spec_V1_2/generate_proposals.example.cpp
@@ -330,7 +330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -360,7 +360,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -390,7 +390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -420,7 +420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1270,7 +1270,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1300,7 +1300,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1672,7 +1672,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1702,7 +1702,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1732,7 +1732,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1762,7 +1762,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2142,7 +2142,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2172,7 +2172,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2202,7 +2202,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2232,7 +2232,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2612,7 +2612,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2642,7 +2642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2672,7 +2672,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2702,7 +2702,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3082,7 +3082,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3112,7 +3112,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3484,7 +3484,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3514,7 +3514,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3544,7 +3544,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3574,7 +3574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3954,7 +3954,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3984,7 +3984,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4014,7 +4014,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4044,7 +4044,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4424,7 +4424,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4454,7 +4454,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4484,7 +4484,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4514,7 +4514,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4894,7 +4894,7 @@ .scale = 0.005f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4924,7 +4924,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -5296,7 +5296,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5326,7 +5326,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5356,7 +5356,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5386,7 +5386,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5766,7 +5766,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5796,7 +5796,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5826,7 +5826,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5856,7 +5856,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6236,7 +6236,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6266,7 +6266,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6296,7 +6296,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6326,7 +6326,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6706,7 +6706,7 @@ .scale = 0.005f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6736,7 +6736,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7108,7 +7108,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7138,7 +7138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7168,7 +7168,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7198,7 +7198,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/greater.example.cpp b/runtime/test/generated/spec_V1_2/greater.example.cpp index 986ecea..5b5fddf 100644 --- a/runtime/test/generated/spec_V1_2/greater.example.cpp +++ b/runtime/test/generated/spec_V1_2/greater.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -349,7 +349,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -379,7 +379,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -531,7 +531,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -561,7 +561,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -713,7 +713,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -743,7 +743,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -952,7 +952,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -982,7 +982,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1134,7 +1134,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1164,7 +1164,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1316,7 +1316,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1346,7 +1346,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1498,7 +1498,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1528,7 +1528,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 129 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({129}), .dimensions = {1}, @@ -1680,7 +1680,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 31 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({31}), .dimensions = {1}, @@ -1710,7 +1710,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 240 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({240}), .dimensions = {1}, @@ -1862,7 +1862,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 240 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({240}), .dimensions = {1}, @@ -1892,7 +1892,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 31 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({31}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/greater_equal.example.cpp b/runtime/test/generated/spec_V1_2/greater_equal.example.cpp index 269a63d..e034a84 100644 --- a/runtime/test/generated/spec_V1_2/greater_equal.example.cpp +++ b/runtime/test/generated/spec_V1_2/greater_equal.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -349,7 +349,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -379,7 +379,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -531,7 +531,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -561,7 +561,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -713,7 +713,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -743,7 +743,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -952,7 +952,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -982,7 +982,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1134,7 +1134,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1164,7 +1164,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1316,7 +1316,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1346,7 +1346,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1498,7 +1498,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1528,7 +1528,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 129 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({129}), .dimensions = {1}, @@ -1680,7 +1680,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 31 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({31}), .dimensions = {1}, @@ -1710,7 +1710,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 240 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({240}), .dimensions = {1}, @@ -1862,7 +1862,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 240 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({240}), .dimensions = {1}, @@ -1892,7 +1892,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 31 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({31}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/grouped_conv2d.example.cpp b/runtime/test/generated/spec_V1_2/grouped_conv2d.example.cpp index dedc267..f3b9fe3 100644 --- a/runtime/test/generated/spec_V1_2/grouped_conv2d.example.cpp +++ b/runtime/test/generated/spec_V1_2/grouped_conv2d.example.cpp
@@ -310,7 +310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -658,7 +658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -688,7 +688,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -718,7 +718,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1074,7 +1074,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1422,7 +1422,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1452,7 +1452,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1482,7 +1482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1838,7 +1838,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2186,7 +2186,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2216,7 +2216,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2568,7 +2568,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2916,7 +2916,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2946,7 +2946,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3304,7 +3304,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3658,7 +3658,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -4012,7 +4012,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -4366,7 +4366,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -4714,7 +4714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5062,7 +5062,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5092,7 +5092,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5122,7 +5122,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5478,7 +5478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5826,7 +5826,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5856,7 +5856,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5886,7 +5886,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6242,7 +6242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6590,7 +6590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6620,7 +6620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6650,7 +6650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7006,7 +7006,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -7354,7 +7354,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -7384,7 +7384,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7736,7 +7736,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -8084,7 +8084,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -8114,7 +8114,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -8472,7 +8472,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -8826,7 +8826,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -9180,7 +9180,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -9534,7 +9534,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -9882,7 +9882,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10230,7 +10230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10260,7 +10260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10290,7 +10290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10646,7 +10646,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10994,7 +10994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11024,7 +11024,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11054,7 +11054,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11410,7 +11410,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11758,7 +11758,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11788,7 +11788,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11818,7 +11818,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12174,7 +12174,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -12522,7 +12522,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -12552,7 +12552,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -12904,7 +12904,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -13252,7 +13252,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -13282,7 +13282,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -13640,7 +13640,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -13994,7 +13994,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -14348,7 +14348,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -14702,7 +14702,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -15050,7 +15050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15398,7 +15398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15428,7 +15428,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15458,7 +15458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15814,7 +15814,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16162,7 +16162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16192,7 +16192,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16222,7 +16222,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16578,7 +16578,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16926,7 +16926,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16956,7 +16956,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16986,7 +16986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17342,7 +17342,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -17690,7 +17690,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -17720,7 +17720,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -18072,7 +18072,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -18420,7 +18420,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -18450,7 +18450,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -18808,7 +18808,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -19162,7 +19162,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -19516,7 +19516,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -19870,7 +19870,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -20218,7 +20218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20566,7 +20566,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20596,7 +20596,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20626,7 +20626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20982,7 +20982,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21330,7 +21330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21360,7 +21360,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21390,7 +21390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21746,7 +21746,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22094,7 +22094,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22124,7 +22124,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22154,7 +22154,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22510,7 +22510,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -22858,7 +22858,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -22888,7 +22888,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -23240,7 +23240,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -23588,7 +23588,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -23618,7 +23618,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -23976,7 +23976,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -24330,7 +24330,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -24684,7 +24684,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -25038,7 +25038,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -25386,7 +25386,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25734,7 +25734,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25764,7 +25764,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25794,7 +25794,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26150,7 +26150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26498,7 +26498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26528,7 +26528,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26558,7 +26558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26914,7 +26914,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27262,7 +27262,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27292,7 +27292,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27322,7 +27322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27678,7 +27678,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -28026,7 +28026,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -28056,7 +28056,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -28408,7 +28408,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -28756,7 +28756,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -28786,7 +28786,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder123 + }, { // dummy123 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -29144,7 +29144,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder124 + }, { // dummy124 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -29498,7 +29498,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -29852,7 +29852,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -30206,7 +30206,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -30554,7 +30554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder128 + }, { // dummy128 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30902,7 +30902,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder129 + }, { // dummy129 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30932,7 +30932,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder130 + }, { // dummy130 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30962,7 +30962,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder131 + }, { // dummy131 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -31318,7 +31318,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder132 + }, { // dummy132 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31666,7 +31666,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder133 + }, { // dummy133 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31696,7 +31696,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder134 + }, { // dummy134 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31726,7 +31726,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder135 + }, { // dummy135 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -32082,7 +32082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder136 + }, { // dummy136 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -32430,7 +32430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder137 + }, { // dummy137 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -32460,7 +32460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder138 + }, { // dummy138 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -32490,7 +32490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder139 + }, { // dummy139 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -32846,7 +32846,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder140 + }, { // dummy140 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -33194,7 +33194,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder141 + }, { // dummy141 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -33224,7 +33224,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder142 + }, { // dummy142 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -33576,7 +33576,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder143 + }, { // dummy143 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -33924,7 +33924,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder144 + }, { // dummy144 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -33954,7 +33954,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder145 + }, { // dummy145 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -34312,7 +34312,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder146 + }, { // dummy146 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -34666,7 +34666,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder147 + }, { // dummy147 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -35020,7 +35020,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder148 + }, { // dummy148 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -35374,7 +35374,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder149 + }, { // dummy149 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -35722,7 +35722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder150 + }, { // dummy150 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -36070,7 +36070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder151 + }, { // dummy151 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -36100,7 +36100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder152 + }, { // dummy152 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -36130,7 +36130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder153 + }, { // dummy153 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -36486,7 +36486,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder154 + }, { // dummy154 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -36834,7 +36834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder155 + }, { // dummy155 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -36864,7 +36864,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder156 + }, { // dummy156 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -36894,7 +36894,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder157 + }, { // dummy157 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -37250,7 +37250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder158 + }, { // dummy158 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -37598,7 +37598,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder159 + }, { // dummy159 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -37628,7 +37628,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder160 + }, { // dummy160 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -37658,7 +37658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder161 + }, { // dummy161 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38014,7 +38014,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder162 + }, { // dummy162 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -38362,7 +38362,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder163 + }, { // dummy163 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -38392,7 +38392,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder164 + }, { // dummy164 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -38744,7 +38744,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder165 + }, { // dummy165 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -39092,7 +39092,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder166 + }, { // dummy166 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -39122,7 +39122,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder167 + }, { // dummy167 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -39480,7 +39480,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder168 + }, { // dummy168 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -39834,7 +39834,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder169 + }, { // dummy169 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -40188,7 +40188,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder170 + }, { // dummy170 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -40542,7 +40542,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder171 + }, { // dummy171 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -40890,7 +40890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder172 + }, { // dummy172 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -41238,7 +41238,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder173 + }, { // dummy173 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -41268,7 +41268,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder174 + }, { // dummy174 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -41298,7 +41298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder175 + }, { // dummy175 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -41594,7 +41594,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder176 + }, { // dummy176 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41882,7 +41882,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder177 + }, { // dummy177 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41912,7 +41912,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder178 + }, { // dummy178 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41942,7 +41942,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder179 + }, { // dummy179 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42238,7 +42238,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder180 + }, { // dummy180 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42526,7 +42526,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder181 + }, { // dummy181 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42556,7 +42556,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder182 + }, { // dummy182 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42586,7 +42586,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder183 + }, { // dummy183 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42882,7 +42882,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder184 + }, { // dummy184 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -43170,7 +43170,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder185 + }, { // dummy185 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -43200,7 +43200,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder186 + }, { // dummy186 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -43498,7 +43498,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder187 + }, { // dummy187 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -43792,7 +43792,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder188 + }, { // dummy188 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -44080,7 +44080,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder189 + }, { // dummy189 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -44368,7 +44368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder190 + }, { // dummy190 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -44398,7 +44398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder191 + }, { // dummy191 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -44428,7 +44428,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder192 + }, { // dummy192 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -44724,7 +44724,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder193 + }, { // dummy193 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45012,7 +45012,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder194 + }, { // dummy194 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45042,7 +45042,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder195 + }, { // dummy195 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45072,7 +45072,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder196 + }, { // dummy196 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45368,7 +45368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder197 + }, { // dummy197 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45656,7 +45656,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder198 + }, { // dummy198 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45686,7 +45686,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder199 + }, { // dummy199 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45716,7 +45716,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder200 + }, { // dummy200 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -46012,7 +46012,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder201 + }, { // dummy201 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -46300,7 +46300,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder202 + }, { // dummy202 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -46330,7 +46330,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder203 + }, { // dummy203 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -46628,7 +46628,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder204 + }, { // dummy204 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -46922,7 +46922,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder205 + }, { // dummy205 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -47210,7 +47210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder206 + }, { // dummy206 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -47498,7 +47498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder207 + }, { // dummy207 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -47528,7 +47528,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder208 + }, { // dummy208 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -47558,7 +47558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder209 + }, { // dummy209 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -47854,7 +47854,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder210 + }, { // dummy210 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48142,7 +48142,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder211 + }, { // dummy211 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48172,7 +48172,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder212 + }, { // dummy212 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48202,7 +48202,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder213 + }, { // dummy213 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48498,7 +48498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder214 + }, { // dummy214 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48786,7 +48786,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder215 + }, { // dummy215 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48816,7 +48816,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder216 + }, { // dummy216 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48846,7 +48846,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder217 + }, { // dummy217 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49142,7 +49142,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder218 + }, { // dummy218 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -49430,7 +49430,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder219 + }, { // dummy219 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -49460,7 +49460,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder220 + }, { // dummy220 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -49758,7 +49758,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder221 + }, { // dummy221 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -50052,7 +50052,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder222 + }, { // dummy222 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -50340,7 +50340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder223 + }, { // dummy223 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -50628,7 +50628,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder224 + }, { // dummy224 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -50658,7 +50658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder225 + }, { // dummy225 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -50688,7 +50688,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder226 + }, { // dummy226 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -50984,7 +50984,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder227 + }, { // dummy227 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51272,7 +51272,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder228 + }, { // dummy228 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51302,7 +51302,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder229 + }, { // dummy229 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51332,7 +51332,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder230 + }, { // dummy230 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51628,7 +51628,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder231 + }, { // dummy231 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51916,7 +51916,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder232 + }, { // dummy232 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51946,7 +51946,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder233 + }, { // dummy233 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51976,7 +51976,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder234 + }, { // dummy234 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -52272,7 +52272,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder235 + }, { // dummy235 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -52560,7 +52560,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder236 + }, { // dummy236 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -52590,7 +52590,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder237 + }, { // dummy237 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -52888,7 +52888,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder238 + }, { // dummy238 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -53182,7 +53182,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder239 + }, { // dummy239 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -53470,7 +53470,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder240 + }, { // dummy240 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -53758,7 +53758,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder241 + }, { // dummy241 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -53788,7 +53788,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder242 + }, { // dummy242 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -53818,7 +53818,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder243 + }, { // dummy243 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/heatmap_max_keypoint.example.cpp b/runtime/test/generated/spec_V1_2/heatmap_max_keypoint.example.cpp index 8de0754..73d22fc 100644 --- a/runtime/test/generated/spec_V1_2/heatmap_max_keypoint.example.cpp +++ b/runtime/test/generated/spec_V1_2/heatmap_max_keypoint.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -180,7 +180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -372,7 +372,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -402,7 +402,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -594,7 +594,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -624,7 +624,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -816,7 +816,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1004,7 +1004,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1034,7 +1034,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1226,7 +1226,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1256,7 +1256,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1448,7 +1448,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1478,7 +1478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1670,7 +1670,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1858,7 +1858,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1888,7 +1888,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2080,7 +2080,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2110,7 +2110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2302,7 +2302,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2332,7 +2332,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2524,7 +2524,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2712,7 +2712,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2742,7 +2742,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2934,7 +2934,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2964,7 +2964,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3156,7 +3156,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3186,7 +3186,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3378,7 +3378,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3566,7 +3566,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3596,7 +3596,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3788,7 +3788,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3818,7 +3818,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4010,7 +4010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4040,7 +4040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4232,7 +4232,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -4420,7 +4420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4450,7 +4450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4642,7 +4642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4672,7 +4672,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4864,7 +4864,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4894,7 +4894,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5086,7 +5086,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/instance_normalization.example.cpp b/runtime/test/generated/spec_V1_2/instance_normalization.example.cpp index 92a605f..75ede60 100644 --- a/runtime/test/generated/spec_V1_2/instance_normalization.example.cpp +++ b/runtime/test/generated/spec_V1_2/instance_normalization.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -378,7 +378,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -586,7 +586,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -794,7 +794,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1002,7 +1002,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1210,7 +1210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1418,7 +1418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1626,7 +1626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1834,7 +1834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2042,7 +2042,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2250,7 +2250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2458,7 +2458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2666,7 +2666,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2874,7 +2874,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3082,7 +3082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3290,7 +3290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3498,7 +3498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3706,7 +3706,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/l2_normalization_axis.example.cpp b/runtime/test/generated/spec_V1_2/l2_normalization_axis.example.cpp index 4025001..42e64ff 100644 --- a/runtime/test/generated/spec_V1_2/l2_normalization_axis.example.cpp +++ b/runtime/test/generated/spec_V1_2/l2_normalization_axis.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -702,7 +702,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -850,7 +850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -998,7 +998,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1146,7 +1146,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -1294,7 +1294,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1442,7 +1442,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1590,7 +1590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1738,7 +1738,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -1886,7 +1886,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2034,7 +2034,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2182,7 +2182,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2330,7 +2330,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -2478,7 +2478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2626,7 +2626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2774,7 +2774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2922,7 +2922,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -3070,7 +3070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3218,7 +3218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3366,7 +3366,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3514,7 +3514,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -3662,7 +3662,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3810,7 +3810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3958,7 +3958,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4106,7 +4106,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -4254,7 +4254,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4402,7 +4402,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4550,7 +4550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4698,7 +4698,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -4846,7 +4846,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4994,7 +4994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5142,7 +5142,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5290,7 +5290,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -5438,7 +5438,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5586,7 +5586,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5734,7 +5734,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5882,7 +5882,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -6030,7 +6030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6178,7 +6178,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6326,7 +6326,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6474,7 +6474,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -6622,7 +6622,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6770,7 +6770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6918,7 +6918,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7066,7 +7066,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -7214,7 +7214,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7362,7 +7362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7510,7 +7510,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7658,7 +7658,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -7806,7 +7806,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7954,7 +7954,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8102,7 +8102,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8250,7 +8250,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -8398,7 +8398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8546,7 +8546,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8694,7 +8694,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8842,7 +8842,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -8990,7 +8990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9138,7 +9138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9286,7 +9286,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9434,7 +9434,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -9582,7 +9582,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9730,7 +9730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9878,7 +9878,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10026,7 +10026,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -10174,7 +10174,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10322,7 +10322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10470,7 +10470,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10618,7 +10618,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -10766,7 +10766,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10914,7 +10914,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11062,7 +11062,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11210,7 +11210,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -11358,7 +11358,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11506,7 +11506,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11654,7 +11654,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11802,7 +11802,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -11950,7 +11950,7 @@ .scale = 0.904414f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 246 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({246}), .dimensions = {1}, @@ -12098,7 +12098,7 @@ .scale = 0.904414f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 246 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({246}), .dimensions = {1}, @@ -12246,7 +12246,7 @@ .scale = 0.904414f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 246 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({246}), .dimensions = {1}, @@ -12394,7 +12394,7 @@ .scale = 0.904414f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 246 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({246}), .dimensions = {1}, @@ -12542,7 +12542,7 @@ .scale = 0.904414f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 246 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({246}), .dimensions = {1}, @@ -12690,7 +12690,7 @@ .scale = 0.904414f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 246 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({246}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/l2_normalization_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/l2_normalization_v1_2.example.cpp index 4ccfd0b..59b0928 100644 --- a/runtime/test/generated/spec_V1_2/l2_normalization_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/l2_normalization_v1_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -346,7 +346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -602,7 +602,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -730,7 +730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -858,7 +858,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -986,7 +986,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -1114,7 +1114,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1242,7 +1242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1370,7 +1370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1498,7 +1498,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -1626,7 +1626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1754,7 +1754,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1882,7 +1882,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2010,7 +2010,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/l2_pool_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/l2_pool_v1_2.example.cpp index eb8eaa8..2a7ee72 100644 --- a/runtime/test/generated/spec_V1_2/l2_pool_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/l2_pool_v1_2.example.cpp
@@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -618,7 +618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -946,7 +946,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1274,7 +1274,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1602,7 +1602,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1930,7 +1930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2198,7 +2198,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2466,7 +2466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2734,7 +2734,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3002,7 +3002,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3270,7 +3270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3538,7 +3538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3866,7 +3866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4194,7 +4194,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4522,7 +4522,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4850,7 +4850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5178,7 +5178,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5506,7 +5506,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/layer_norm_lstm.example.cpp b/runtime/test/generated/spec_V1_2/layer_norm_lstm.example.cpp index e0e739e..49f05c6 100644 --- a/runtime/test/generated/spec_V1_2/layer_norm_lstm.example.cpp +++ b/runtime/test/generated/spec_V1_2/layer_norm_lstm.example.cpp
@@ -670,7 +670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -700,7 +700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -730,7 +730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -760,7 +760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -790,7 +790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -820,7 +820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -850,7 +850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -880,7 +880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -910,7 +910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -940,7 +940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1000,7 +1000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1030,7 +1030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1060,7 +1060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1090,7 +1090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1120,7 +1120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1150,7 +1150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1180,7 +1180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1210,7 +1210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1240,7 +1240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1270,7 +1270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1300,7 +1300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1330,7 +1330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2126,7 +2126,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2156,7 +2156,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2186,7 +2186,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2216,7 +2216,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2246,7 +2246,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2276,7 +2276,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2306,7 +2306,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2336,7 +2336,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2366,7 +2366,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2396,7 +2396,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2426,7 +2426,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2456,7 +2456,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2486,7 +2486,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2516,7 +2516,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2546,7 +2546,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2576,7 +2576,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2606,7 +2606,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2636,7 +2636,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2666,7 +2666,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2696,7 +2696,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2726,7 +2726,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2756,7 +2756,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2786,7 +2786,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3582,7 +3582,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3612,7 +3612,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3642,7 +3642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3672,7 +3672,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3702,7 +3702,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3732,7 +3732,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3762,7 +3762,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3792,7 +3792,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3822,7 +3822,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3852,7 +3852,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3882,7 +3882,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3912,7 +3912,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3942,7 +3942,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3972,7 +3972,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4002,7 +4002,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4032,7 +4032,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4062,7 +4062,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4092,7 +4092,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4122,7 +4122,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4152,7 +4152,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4182,7 +4182,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4212,7 +4212,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4242,7 +4242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5038,7 +5038,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5068,7 +5068,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5098,7 +5098,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5128,7 +5128,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5158,7 +5158,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5188,7 +5188,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5218,7 +5218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5248,7 +5248,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5278,7 +5278,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5308,7 +5308,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5338,7 +5338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5368,7 +5368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5398,7 +5398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5428,7 +5428,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5458,7 +5458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5488,7 +5488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5518,7 +5518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5548,7 +5548,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6324,7 +6324,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6354,7 +6354,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6384,7 +6384,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6414,7 +6414,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6444,7 +6444,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6474,7 +6474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6504,7 +6504,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6534,7 +6534,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6564,7 +6564,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6594,7 +6594,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6624,7 +6624,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6654,7 +6654,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6684,7 +6684,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6714,7 +6714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6744,7 +6744,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6774,7 +6774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6804,7 +6804,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6834,7 +6834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7610,7 +7610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7640,7 +7640,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7670,7 +7670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7700,7 +7700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7730,7 +7730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7760,7 +7760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7790,7 +7790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7820,7 +7820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7850,7 +7850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7880,7 +7880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7910,7 +7910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7940,7 +7940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7970,7 +7970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8000,7 +8000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8030,7 +8030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8060,7 +8060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8090,7 +8090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8120,7 +8120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/less.example.cpp b/runtime/test/generated/spec_V1_2/less.example.cpp index 01159f8..3f130a7 100644 --- a/runtime/test/generated/spec_V1_2/less.example.cpp +++ b/runtime/test/generated/spec_V1_2/less.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -349,7 +349,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -379,7 +379,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -531,7 +531,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -561,7 +561,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -713,7 +713,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -743,7 +743,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -952,7 +952,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -982,7 +982,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1134,7 +1134,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1164,7 +1164,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1316,7 +1316,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1346,7 +1346,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1498,7 +1498,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1528,7 +1528,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 129 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({129}), .dimensions = {1}, @@ -1680,7 +1680,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 31 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({31}), .dimensions = {1}, @@ -1710,7 +1710,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 240 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({240}), .dimensions = {1}, @@ -1862,7 +1862,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 240 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({240}), .dimensions = {1}, @@ -1892,7 +1892,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 31 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({31}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/less_equal.example.cpp b/runtime/test/generated/spec_V1_2/less_equal.example.cpp index 40f6ea1..857692a 100644 --- a/runtime/test/generated/spec_V1_2/less_equal.example.cpp +++ b/runtime/test/generated/spec_V1_2/less_equal.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -349,7 +349,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -379,7 +379,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -531,7 +531,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -561,7 +561,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -713,7 +713,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -743,7 +743,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -952,7 +952,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -982,7 +982,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1134,7 +1134,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1164,7 +1164,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1316,7 +1316,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1346,7 +1346,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1498,7 +1498,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1528,7 +1528,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 129 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({129}), .dimensions = {1}, @@ -1680,7 +1680,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 31 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({31}), .dimensions = {1}, @@ -1710,7 +1710,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 240 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({240}), .dimensions = {1}, @@ -1862,7 +1862,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 240 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({240}), .dimensions = {1}, @@ -1892,7 +1892,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 31 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({31}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/local_response_normalization_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/local_response_normalization_v1_2.example.cpp index 5bb491f..b52ba3e 100644 --- a/runtime/test/generated/spec_V1_2/local_response_normalization_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/local_response_normalization_v1_2.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -646,7 +646,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -874,7 +874,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1102,7 +1102,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1330,7 +1330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1558,7 +1558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1786,7 +1786,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2014,7 +2014,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2242,7 +2242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2470,7 +2470,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2698,7 +2698,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2926,7 +2926,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3154,7 +3154,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3382,7 +3382,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3610,7 +3610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3838,7 +3838,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4066,7 +4066,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4294,7 +4294,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4522,7 +4522,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4750,7 +4750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4978,7 +4978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5206,7 +5206,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5434,7 +5434,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5662,7 +5662,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5890,7 +5890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6118,7 +6118,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6346,7 +6346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6574,7 +6574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6802,7 +6802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7030,7 +7030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7258,7 +7258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7486,7 +7486,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7714,7 +7714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7942,7 +7942,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8170,7 +8170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8398,7 +8398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8626,7 +8626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8854,7 +8854,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9082,7 +9082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9310,7 +9310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9538,7 +9538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9766,7 +9766,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9994,7 +9994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10222,7 +10222,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10450,7 +10450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10678,7 +10678,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10906,7 +10906,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11134,7 +11134,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11362,7 +11362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11590,7 +11590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11818,7 +11818,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12046,7 +12046,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12274,7 +12274,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12502,7 +12502,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12730,7 +12730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12958,7 +12958,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13186,7 +13186,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13414,7 +13414,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13642,7 +13642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13870,7 +13870,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14098,7 +14098,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14326,7 +14326,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -14554,7 +14554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14782,7 +14782,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15010,7 +15010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15238,7 +15238,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15466,7 +15466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15694,7 +15694,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15922,7 +15922,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16150,7 +16150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16378,7 +16378,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -16606,7 +16606,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16834,7 +16834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17062,7 +17062,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -17290,7 +17290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17518,7 +17518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17746,7 +17746,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -17974,7 +17974,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18202,7 +18202,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18430,7 +18430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -18658,7 +18658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18886,7 +18886,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19114,7 +19114,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -19342,7 +19342,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19570,7 +19570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19798,7 +19798,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20026,7 +20026,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20254,7 +20254,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20482,7 +20482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20710,7 +20710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20938,7 +20938,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21166,7 +21166,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21394,7 +21394,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21622,7 +21622,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21850,7 +21850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -22078,7 +22078,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22306,7 +22306,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22534,7 +22534,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -22762,7 +22762,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22990,7 +22990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23218,7 +23218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -23446,7 +23446,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23674,7 +23674,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23902,7 +23902,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24130,7 +24130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24358,7 +24358,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24586,7 +24586,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24814,7 +24814,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25042,7 +25042,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25270,7 +25270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25498,7 +25498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25726,7 +25726,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25954,7 +25954,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26182,7 +26182,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26410,7 +26410,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26638,7 +26638,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26866,7 +26866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27094,7 +27094,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27322,7 +27322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -27530,7 +27530,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27738,7 +27738,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27946,7 +27946,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28154,7 +28154,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder123 + }, { // dummy123 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -28362,7 +28362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder124 + }, { // dummy124 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -28570,7 +28570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28778,7 +28778,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -28986,7 +28986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -29194,7 +29194,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder128 + }, { // dummy128 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/logistic_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/logistic_v1_2.example.cpp index 8a9590c..ef601f5 100644 --- a/runtime/test/generated/spec_V1_2/logistic_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/logistic_v1_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/lsh_projection_float16.example.cpp b/runtime/test/generated/spec_V1_2/lsh_projection_float16.example.cpp index 2c4e871..4a57101 100644 --- a/runtime/test/generated/spec_V1_2/lsh_projection_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/lsh_projection_float16.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/lstm2_float16.example.cpp b/runtime/test/generated/spec_V1_2/lstm2_float16.example.cpp index 3c23c7c..553a691 100644 --- a/runtime/test/generated/spec_V1_2/lstm2_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/lstm2_float16.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/lstm2_state2_float16.example.cpp b/runtime/test/generated/spec_V1_2/lstm2_state2_float16.example.cpp index 2f8c96c..174ad82 100644 --- a/runtime/test/generated/spec_V1_2/lstm2_state2_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/lstm2_state2_float16.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/lstm2_state_float16.example.cpp b/runtime/test/generated/spec_V1_2/lstm2_state_float16.example.cpp index 033362d..3ca5c64 100644 --- a/runtime/test/generated/spec_V1_2/lstm2_state_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/lstm2_state_float16.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/lstm3_float16.example.cpp b/runtime/test/generated/spec_V1_2/lstm3_float16.example.cpp index 654aef8..1e4db7e 100644 --- a/runtime/test/generated/spec_V1_2/lstm3_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/lstm3_float16.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1070,7 +1070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1130,7 +1130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/lstm3_state2_float16.example.cpp b/runtime/test/generated/spec_V1_2/lstm3_state2_float16.example.cpp index 5773734..bfb38a7 100644 --- a/runtime/test/generated/spec_V1_2/lstm3_state2_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/lstm3_state2_float16.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1070,7 +1070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1130,7 +1130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/lstm3_state3_float16.example.cpp b/runtime/test/generated/spec_V1_2/lstm3_state3_float16.example.cpp index 2c4bf54..5cc2a1c 100644 --- a/runtime/test/generated/spec_V1_2/lstm3_state3_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/lstm3_state3_float16.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1070,7 +1070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1130,7 +1130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/lstm3_state_float16.example.cpp b/runtime/test/generated/spec_V1_2/lstm3_state_float16.example.cpp index 62db277..a16b6ae 100644 --- a/runtime/test/generated/spec_V1_2/lstm3_state_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/lstm3_state_float16.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1070,7 +1070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1130,7 +1130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/lstm_float16.example.cpp b/runtime/test/generated/spec_V1_2/lstm_float16.example.cpp index 33979a4..8d4c6ea 100644 --- a/runtime/test/generated/spec_V1_2/lstm_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/lstm_float16.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/lstm_state2_float16.example.cpp b/runtime/test/generated/spec_V1_2/lstm_state2_float16.example.cpp index 88b8d83..51c7e9d 100644 --- a/runtime/test/generated/spec_V1_2/lstm_state2_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/lstm_state2_float16.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/lstm_state_float16.example.cpp b/runtime/test/generated/spec_V1_2/lstm_state_float16.example.cpp index 2a6a61e..b589b69 100644 --- a/runtime/test/generated/spec_V1_2/lstm_state_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/lstm_state_float16.example.cpp
@@ -590,7 +590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -620,7 +620,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -650,7 +650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -680,7 +680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -710,7 +710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -740,7 +740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -800,7 +800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -830,7 +830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -860,7 +860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -890,7 +890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -920,7 +920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -950,7 +950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -980,7 +980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1010,7 +1010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/max_pool_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/max_pool_v1_2.example.cpp index 32ec568..e3b9125 100644 --- a/runtime/test/generated/spec_V1_2/max_pool_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/max_pool_v1_2.example.cpp
@@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -618,7 +618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -946,7 +946,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1274,7 +1274,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1602,7 +1602,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1930,7 +1930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2258,7 +2258,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2586,7 +2586,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2914,7 +2914,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3242,7 +3242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3570,7 +3570,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3898,7 +3898,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4226,7 +4226,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4554,7 +4554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4882,7 +4882,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5210,7 +5210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5538,7 +5538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5866,7 +5866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6194,7 +6194,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6522,7 +6522,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6850,7 +6850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7178,7 +7178,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7506,7 +7506,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -7834,7 +7834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8102,7 +8102,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8370,7 +8370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8638,7 +8638,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -8906,7 +8906,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9174,7 +9174,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9442,7 +9442,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9710,7 +9710,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -9978,7 +9978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/maximum.example.cpp b/runtime/test/generated/spec_V1_2/maximum.example.cpp index 108fbe7..c2e9d08 100644 --- a/runtime/test/generated/spec_V1_2/maximum.example.cpp +++ b/runtime/test/generated/spec_V1_2/maximum.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -292,7 +292,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -322,7 +322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -504,7 +504,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -713,7 +713,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -743,7 +743,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -895,7 +895,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -925,7 +925,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1077,7 +1077,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1107,7 +1107,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1259,7 +1259,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1289,7 +1289,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1498,7 +1498,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -1528,7 +1528,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1680,7 +1680,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1710,7 +1710,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/mean_float16.example.cpp b/runtime/test/generated/spec_V1_2/mean_float16.example.cpp index 2767c50..be935ea 100644 --- a/runtime/test/generated/spec_V1_2/mean_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/mean_float16.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/minimum.example.cpp b/runtime/test/generated/spec_V1_2/minimum.example.cpp index d9f22a8..5f6e064 100644 --- a/runtime/test/generated/spec_V1_2/minimum.example.cpp +++ b/runtime/test/generated/spec_V1_2/minimum.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -292,7 +292,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -322,7 +322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -504,7 +504,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -713,7 +713,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -743,7 +743,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -895,7 +895,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -925,7 +925,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1077,7 +1077,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1107,7 +1107,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1259,7 +1259,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1289,7 +1289,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1498,7 +1498,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -1528,7 +1528,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1680,7 +1680,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1710,7 +1710,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/mul_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/mul_v1_2.example.cpp index 24eff4e..07d0e61 100644 --- a/runtime/test/generated/spec_V1_2/mul_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/mul_v1_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -332,7 +332,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -362,7 +362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/not_equal.example.cpp b/runtime/test/generated/spec_V1_2/not_equal.example.cpp index ec155b0..c602ba9 100644 --- a/runtime/test/generated/spec_V1_2/not_equal.example.cpp +++ b/runtime/test/generated/spec_V1_2/not_equal.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -349,7 +349,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -379,7 +379,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -531,7 +531,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -561,7 +561,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -713,7 +713,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -743,7 +743,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -952,7 +952,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -982,7 +982,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1134,7 +1134,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1164,7 +1164,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1316,7 +1316,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1346,7 +1346,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1498,7 +1498,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1528,7 +1528,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 129 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({129}), .dimensions = {1}, @@ -1680,7 +1680,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 31 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({31}), .dimensions = {1}, @@ -1710,7 +1710,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 240 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({240}), .dimensions = {1}, @@ -1862,7 +1862,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 240 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({240}), .dimensions = {1}, @@ -1892,7 +1892,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 31 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({31}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pad_all_dims.example.cpp b/runtime/test/generated/spec_V1_2/pad_all_dims.example.cpp index 589f16f..5ac39da 100644 --- a/runtime/test/generated/spec_V1_2/pad_all_dims.example.cpp +++ b/runtime/test/generated/spec_V1_2/pad_all_dims.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -702,7 +702,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -850,7 +850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -998,7 +998,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1146,7 +1146,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pad_float16.example.cpp b/runtime/test/generated/spec_V1_2/pad_float16.example.cpp index 7cfd5f4..591b389 100644 --- a/runtime/test/generated/spec_V1_2/pad_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/pad_float16.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pad_low_rank.example.cpp b/runtime/test/generated/spec_V1_2/pad_low_rank.example.cpp index ec8725d..f690ca3 100644 --- a/runtime/test/generated/spec_V1_2/pad_low_rank.example.cpp +++ b/runtime/test/generated/spec_V1_2/pad_low_rank.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pad_low_rank_quant8.example.cpp b/runtime/test/generated/spec_V1_2/pad_low_rank_quant8.example.cpp index abff22c..f0fa558 100644 --- a/runtime/test/generated/spec_V1_2/pad_low_rank_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_2/pad_low_rank_quant8.example.cpp
@@ -110,7 +110,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pad_quant8.example.cpp b/runtime/test/generated/spec_V1_2/pad_quant8.example.cpp index dd81183..0d97ce0 100644 --- a/runtime/test/generated/spec_V1_2/pad_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_2/pad_quant8.example.cpp
@@ -110,7 +110,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pad_quant8_nonzero.example.cpp b/runtime/test/generated/spec_V1_2/pad_quant8_nonzero.example.cpp index c513673..6515217 100644 --- a/runtime/test/generated/spec_V1_2/pad_quant8_nonzero.example.cpp +++ b/runtime/test/generated/spec_V1_2/pad_quant8_nonzero.example.cpp
@@ -110,7 +110,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 9 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({9}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 9 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({9}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pad_v2_1_float.example.cpp b/runtime/test/generated/spec_V1_2/pad_v2_1_float.example.cpp index ad39d1f..149dd0c 100644 --- a/runtime/test/generated/spec_V1_2/pad_v2_1_float.example.cpp +++ b/runtime/test/generated/spec_V1_2/pad_v2_1_float.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pad_v2_1_quant8.example.cpp b/runtime/test/generated/spec_V1_2/pad_v2_1_quant8.example.cpp index f600819..acbc1d4 100644 --- a/runtime/test/generated/spec_V1_2/pad_v2_1_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_2/pad_v2_1_quant8.example.cpp
@@ -130,7 +130,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 4 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({4}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 4 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({4}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pad_v2_all_dims.example.cpp b/runtime/test/generated/spec_V1_2/pad_v2_all_dims.example.cpp index ca2f692..afe4021 100644 --- a/runtime/test/generated/spec_V1_2/pad_v2_all_dims.example.cpp +++ b/runtime/test/generated/spec_V1_2/pad_v2_all_dims.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pad_v2_all_dims_quant8.example.cpp b/runtime/test/generated/spec_V1_2/pad_v2_all_dims_quant8.example.cpp index e3f9881..7780e18 100644 --- a/runtime/test/generated/spec_V1_2/pad_v2_all_dims_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_2/pad_v2_all_dims_quant8.example.cpp
@@ -130,7 +130,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 4 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({4}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 4 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({4}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pad_v2_low_rank.example.cpp b/runtime/test/generated/spec_V1_2/pad_v2_low_rank.example.cpp index ae6c5b8..b942d6e 100644 --- a/runtime/test/generated/spec_V1_2/pad_v2_low_rank.example.cpp +++ b/runtime/test/generated/spec_V1_2/pad_v2_low_rank.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pad_v2_low_rank_quant8.example.cpp b/runtime/test/generated/spec_V1_2/pad_v2_low_rank_quant8.example.cpp index c629bc1..de46196 100644 --- a/runtime/test/generated/spec_V1_2/pad_v2_low_rank_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_2/pad_v2_low_rank_quant8.example.cpp
@@ -130,7 +130,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 4 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({4}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 4 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({4}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/pow.example.cpp b/runtime/test/generated/spec_V1_2/pow.example.cpp index 87ee22d..416763b 100644 --- a/runtime/test/generated/spec_V1_2/pow.example.cpp +++ b/runtime/test/generated/spec_V1_2/pow.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -292,7 +292,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -322,7 +322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -504,7 +504,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -656,7 +656,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -686,7 +686,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -838,7 +838,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -868,7 +868,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1020,7 +1020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1050,7 +1050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1202,7 +1202,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1232,7 +1232,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1384,7 +1384,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1414,7 +1414,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1566,7 +1566,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1596,7 +1596,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/prelu.example.cpp b/runtime/test/generated/spec_V1_2/prelu.example.cpp index 4ff431b..f4a248f 100644 --- a/runtime/test/generated/spec_V1_2/prelu.example.cpp +++ b/runtime/test/generated/spec_V1_2/prelu.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -288,7 +288,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -440,7 +440,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -588,7 +588,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -618,7 +618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -918,7 +918,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -948,7 +948,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 50 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({50}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1248,7 +1248,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1278,7 +1278,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 50 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({50}), .dimensions = {1}, @@ -1430,7 +1430,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1578,7 +1578,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1608,7 +1608,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 50 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({50}), .dimensions = {1}, @@ -1760,7 +1760,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1908,7 +1908,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1938,7 +1938,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 50 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({50}), .dimensions = {1}, @@ -2090,7 +2090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2238,7 +2238,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2268,7 +2268,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/quantize.example.cpp b/runtime/test/generated/spec_V1_2/quantize.example.cpp index e5c7709..31b223f 100644 --- a/runtime/test/generated/spec_V1_2/quantize.example.cpp +++ b/runtime/test/generated/spec_V1_2/quantize.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -346,7 +346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -602,7 +602,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -730,7 +730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -858,7 +858,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -986,7 +986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/quantized_lstm.example.cpp b/runtime/test/generated/spec_V1_2/quantized_lstm.example.cpp index f17e8d9..c2b827a 100644 --- a/runtime/test/generated/spec_V1_2/quantized_lstm.example.cpp +++ b/runtime/test/generated/spec_V1_2/quantized_lstm.example.cpp
@@ -390,7 +390,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -420,7 +420,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -450,7 +450,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -480,7 +480,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -510,7 +510,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -540,7 +540,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -570,7 +570,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -600,7 +600,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -630,7 +630,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -660,7 +660,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1124,7 +1124,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1154,7 +1154,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1184,7 +1184,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1214,7 +1214,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1244,7 +1244,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1274,7 +1274,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1304,7 +1304,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1334,7 +1334,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1364,7 +1364,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1394,7 +1394,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1858,7 +1858,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1888,7 +1888,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2320,7 +2320,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2350,7 +2350,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2380,7 +2380,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2410,7 +2410,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2440,7 +2440,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2470,7 +2470,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2500,7 +2500,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2530,7 +2530,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2560,7 +2560,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2590,7 +2590,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3054,7 +3054,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3084,7 +3084,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3516,7 +3516,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3546,7 +3546,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3576,7 +3576,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3606,7 +3606,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3636,7 +3636,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3666,7 +3666,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3696,7 +3696,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3726,7 +3726,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3756,7 +3756,7 @@ .scale = 0.00408021f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3786,7 +3786,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/reduce_max.example.cpp b/runtime/test/generated/spec_V1_2/reduce_max.example.cpp index 710a871..fc126c8 100644 --- a/runtime/test/generated/spec_V1_2/reduce_max.example.cpp +++ b/runtime/test/generated/spec_V1_2/reduce_max.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -1474,7 +1474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1642,7 +1642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -2146,7 +2146,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2314,7 +2314,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2482,7 +2482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2650,7 +2650,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/reduce_max_b155508675.example.cpp b/runtime/test/generated/spec_V1_2/reduce_max_b155508675.example.cpp index e930759..17af912 100644 --- a/runtime/test/generated/spec_V1_2/reduce_max_b155508675.example.cpp +++ b/runtime/test/generated/spec_V1_2/reduce_max_b155508675.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/reduce_min.example.cpp b/runtime/test/generated/spec_V1_2/reduce_min.example.cpp index cc83245..904d9d1 100644 --- a/runtime/test/generated/spec_V1_2/reduce_min.example.cpp +++ b/runtime/test/generated/spec_V1_2/reduce_min.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -1474,7 +1474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1642,7 +1642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -2146,7 +2146,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2314,7 +2314,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2482,7 +2482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2650,7 +2650,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/reduce_min_b155508675.example.cpp b/runtime/test/generated/spec_V1_2/reduce_min_b155508675.example.cpp index ad1c9dc..170554e 100644 --- a/runtime/test/generated/spec_V1_2/reduce_min_b155508675.example.cpp +++ b/runtime/test/generated/spec_V1_2/reduce_min_b155508675.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/reduce_prod.example.cpp b/runtime/test/generated/spec_V1_2/reduce_prod.example.cpp index 7a450d8..fbf174a 100644 --- a/runtime/test/generated/spec_V1_2/reduce_prod.example.cpp +++ b/runtime/test/generated/spec_V1_2/reduce_prod.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1474,7 +1474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1642,7 +1642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/reduce_prod_b155508675.example.cpp b/runtime/test/generated/spec_V1_2/reduce_prod_b155508675.example.cpp index 0809b05..8fcb2fc 100644 --- a/runtime/test/generated/spec_V1_2/reduce_prod_b155508675.example.cpp +++ b/runtime/test/generated/spec_V1_2/reduce_prod_b155508675.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/reduce_sum.example.cpp b/runtime/test/generated/spec_V1_2/reduce_sum.example.cpp index 1d0d5ab..38e2574 100644 --- a/runtime/test/generated/spec_V1_2/reduce_sum.example.cpp +++ b/runtime/test/generated/spec_V1_2/reduce_sum.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1474,7 +1474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1642,7 +1642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/reduce_sum_b155508675.example.cpp b/runtime/test/generated/spec_V1_2/reduce_sum_b155508675.example.cpp index a04bfd7..e210964 100644 --- a/runtime/test/generated/spec_V1_2/reduce_sum_b155508675.example.cpp +++ b/runtime/test/generated/spec_V1_2/reduce_sum_b155508675.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/relu1_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/relu1_v1_2.example.cpp index 0ffcff7..a10bfe5 100644 --- a/runtime/test/generated/spec_V1_2/relu1_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/relu1_v1_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/relu6_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/relu6_v1_2.example.cpp index bc56cc4..7742ecd 100644 --- a/runtime/test/generated/spec_V1_2/relu6_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/relu6_v1_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/relu_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/relu_v1_2.example.cpp index 9214af1..d15fe96 100644 --- a/runtime/test/generated/spec_V1_2/relu_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/relu_v1_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/reshape_float16.example.cpp b/runtime/test/generated/spec_V1_2/reshape_float16.example.cpp index 82f951d..557bb8a 100644 --- a/runtime/test/generated/spec_V1_2/reshape_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/reshape_float16.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/resize_bilinear_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/resize_bilinear_v1_2.example.cpp index 648faa7..45cf5c5 100644 --- a/runtime/test/generated/spec_V1_2/resize_bilinear_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/resize_bilinear_v1_2.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -526,7 +526,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -714,7 +714,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -902,7 +902,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1090,7 +1090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1278,7 +1278,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1466,7 +1466,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1654,7 +1654,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1842,7 +1842,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2030,7 +2030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2218,7 +2218,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2406,7 +2406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2594,7 +2594,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2782,7 +2782,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2970,7 +2970,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3158,7 +3158,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3346,7 +3346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3534,7 +3534,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3722,7 +3722,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3910,7 +3910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4098,7 +4098,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4286,7 +4286,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4474,7 +4474,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4662,7 +4662,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4850,7 +4850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5038,7 +5038,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5226,7 +5226,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5414,7 +5414,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5602,7 +5602,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5790,7 +5790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5978,7 +5978,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6146,7 +6146,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6314,7 +6314,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6482,7 +6482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6650,7 +6650,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/resize_nearest_neighbor.example.cpp b/runtime/test/generated/spec_V1_2/resize_nearest_neighbor.example.cpp index b5a8541..130ee27 100644 --- a/runtime/test/generated/spec_V1_2/resize_nearest_neighbor.example.cpp +++ b/runtime/test/generated/spec_V1_2/resize_nearest_neighbor.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -526,7 +526,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -714,7 +714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -902,7 +902,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1090,7 +1090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1278,7 +1278,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1466,7 +1466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1654,7 +1654,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1842,7 +1842,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2030,7 +2030,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2218,7 +2218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2406,7 +2406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2594,7 +2594,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2782,7 +2782,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2970,7 +2970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3158,7 +3158,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3346,7 +3346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3534,7 +3534,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3722,7 +3722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3910,7 +3910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4098,7 +4098,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4286,7 +4286,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4474,7 +4474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4662,7 +4662,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4850,7 +4850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5038,7 +5038,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5226,7 +5226,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5414,7 +5414,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5602,7 +5602,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5790,7 +5790,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5978,7 +5978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6166,7 +6166,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6354,7 +6354,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6542,7 +6542,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -6730,7 +6730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6918,7 +6918,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7106,7 +7106,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7294,7 +7294,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -7482,7 +7482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7670,7 +7670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7858,7 +7858,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8046,7 +8046,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -8234,7 +8234,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8422,7 +8422,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8610,7 +8610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8798,7 +8798,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -8986,7 +8986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9174,7 +9174,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9362,7 +9362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9550,7 +9550,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -9738,7 +9738,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9926,7 +9926,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10114,7 +10114,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10302,7 +10302,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -10490,7 +10490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10678,7 +10678,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10866,7 +10866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11054,7 +11054,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -11242,7 +11242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11430,7 +11430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11618,7 +11618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11806,7 +11806,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -11994,7 +11994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12182,7 +12182,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12370,7 +12370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12558,7 +12558,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -12746,7 +12746,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12934,7 +12934,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13122,7 +13122,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13310,7 +13310,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -13498,7 +13498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13686,7 +13686,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13874,7 +13874,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14062,7 +14062,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -14250,7 +14250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -14438,7 +14438,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14626,7 +14626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14814,7 +14814,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -15002,7 +15002,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15190,7 +15190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15378,7 +15378,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15566,7 +15566,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -15754,7 +15754,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15942,7 +15942,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16130,7 +16130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16318,7 +16318,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -16506,7 +16506,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -16694,7 +16694,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16882,7 +16882,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17070,7 +17070,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -17258,7 +17258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -17446,7 +17446,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17634,7 +17634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17822,7 +17822,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -18010,7 +18010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -18198,7 +18198,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18386,7 +18386,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18574,7 +18574,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -18762,7 +18762,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -18950,7 +18950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19138,7 +19138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19326,7 +19326,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -19514,7 +19514,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -19702,7 +19702,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19890,7 +19890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20078,7 +20078,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -20266,7 +20266,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20454,7 +20454,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20642,7 +20642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20830,7 +20830,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -21018,7 +21018,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21206,7 +21206,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21394,7 +21394,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21582,7 +21582,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -21770,7 +21770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21958,7 +21958,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22146,7 +22146,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22334,7 +22334,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -22522,7 +22522,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -22710,7 +22710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22898,7 +22898,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23086,7 +23086,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -23274,7 +23274,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder123 + }, { // dummy123 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -23462,7 +23462,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder124 + }, { // dummy124 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23650,7 +23650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23838,7 +23838,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -24026,7 +24026,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/rnn_float16.example.cpp b/runtime/test/generated/spec_V1_2/rnn_float16.example.cpp index 0de809e..944e541 100644 --- a/runtime/test/generated/spec_V1_2/rnn_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/rnn_float16.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -300,7 +300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -330,7 +330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/roi_align.example.cpp b/runtime/test/generated/spec_V1_2/roi_align.example.cpp index 8a0cdab..8ca45ee 100644 --- a/runtime/test/generated/spec_V1_2/roi_align.example.cpp +++ b/runtime/test/generated/spec_V1_2/roi_align.example.cpp
@@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -300,7 +300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -612,7 +612,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -642,7 +642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -954,7 +954,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1262,7 +1262,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1292,7 +1292,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1604,7 +1604,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1634,7 +1634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1946,7 +1946,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1976,7 +1976,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2288,7 +2288,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2596,7 +2596,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2626,7 +2626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2938,7 +2938,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2968,7 +2968,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3280,7 +3280,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3310,7 +3310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3622,7 +3622,7 @@ .scale = 0.04f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3930,7 +3930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3960,7 +3960,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4272,7 +4272,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4302,7 +4302,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4614,7 +4614,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4644,7 +4644,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4956,7 +4956,7 @@ .scale = 0.04f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5264,7 +5264,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5294,7 +5294,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5606,7 +5606,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5636,7 +5636,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5948,7 +5948,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5978,7 +5978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6290,7 +6290,7 @@ .scale = 0.04f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6598,7 +6598,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6628,7 +6628,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6940,7 +6940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6970,7 +6970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7282,7 +7282,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7312,7 +7312,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7624,7 +7624,7 @@ .scale = 0.04f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -7932,7 +7932,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7962,7 +7962,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8274,7 +8274,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8304,7 +8304,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8616,7 +8616,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8646,7 +8646,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8958,7 +8958,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -9266,7 +9266,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9296,7 +9296,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9608,7 +9608,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9638,7 +9638,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9950,7 +9950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9980,7 +9980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10292,7 +10292,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -10600,7 +10600,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10630,7 +10630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12950,7 +12950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12980,7 +12980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13292,7 +13292,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13322,7 +13322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13634,7 +13634,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -13942,7 +13942,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13972,7 +13972,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -14284,7 +14284,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14314,7 +14314,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14626,7 +14626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14656,7 +14656,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14968,7 +14968,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -15276,7 +15276,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15306,7 +15306,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/roi_pooling.example.cpp b/runtime/test/generated/spec_V1_2/roi_pooling.example.cpp index 6671278..9870dcb 100644 --- a/runtime/test/generated/spec_V1_2/roi_pooling.example.cpp +++ b/runtime/test/generated/spec_V1_2/roi_pooling.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -532,7 +532,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -562,7 +562,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -834,7 +834,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1102,7 +1102,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1132,7 +1132,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1404,7 +1404,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1434,7 +1434,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1706,7 +1706,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1736,7 +1736,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2008,7 +2008,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2276,7 +2276,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2306,7 +2306,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2578,7 +2578,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2608,7 +2608,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2880,7 +2880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2910,7 +2910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3182,7 +3182,7 @@ .scale = 0.04f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3450,7 +3450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3480,7 +3480,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3752,7 +3752,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3782,7 +3782,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4054,7 +4054,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4084,7 +4084,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4356,7 +4356,7 @@ .scale = 0.04f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4624,7 +4624,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4654,7 +4654,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4926,7 +4926,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4956,7 +4956,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5228,7 +5228,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5258,7 +5258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5530,7 +5530,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -5798,7 +5798,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5828,7 +5828,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6100,7 +6100,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6130,7 +6130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6402,7 +6402,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6432,7 +6432,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6704,7 +6704,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -6972,7 +6972,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7002,7 +7002,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/select_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/select_v1_2.example.cpp index 98f9b60..2223f56 100644 --- a/runtime/test/generated/spec_V1_2/select_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/select_v1_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -399,7 +399,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -429,7 +429,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -601,7 +601,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -631,7 +631,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -803,7 +803,7 @@ .scale = 1.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 129 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({129}), .dimensions = {1}, @@ -833,7 +833,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1}, @@ -1005,7 +1005,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1035,7 +1035,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1274,7 +1274,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1304,7 +1304,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1476,7 +1476,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1506,7 +1506,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1678,7 +1678,7 @@ .scale = 1.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 129 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({129}), .dimensions = {1}, @@ -1708,7 +1708,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/slice.example.cpp b/runtime/test/generated/spec_V1_2/slice.example.cpp index d9fa8ad..a5d88fa 100644 --- a/runtime/test/generated/spec_V1_2/slice.example.cpp +++ b/runtime/test/generated/spec_V1_2/slice.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1474,7 +1474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1642,7 +1642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2548,7 +2548,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2716,7 +2716,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2884,7 +2884,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/softmax_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/softmax_v1_2.example.cpp index 02c76c0..5b7af29 100644 --- a/runtime/test/generated/spec_V1_2/softmax_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/softmax_v1_2.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -702,7 +702,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -850,7 +850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -998,7 +998,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1146,7 +1146,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1294,7 +1294,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1442,7 +1442,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1590,7 +1590,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1738,7 +1738,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1886,7 +1886,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2034,7 +2034,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2182,7 +2182,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2330,7 +2330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2478,7 +2478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2626,7 +2626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2774,7 +2774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2922,7 +2922,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3070,7 +3070,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3218,7 +3218,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3366,7 +3366,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3514,7 +3514,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3682,7 +3682,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3850,7 +3850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4018,7 +4018,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4186,7 +4186,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4354,7 +4354,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4522,7 +4522,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4690,7 +4690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4858,7 +4858,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5026,7 +5026,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5194,7 +5194,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5362,7 +5362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5530,7 +5530,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5698,7 +5698,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5866,7 +5866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6034,7 +6034,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6202,7 +6202,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6370,7 +6370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6538,7 +6538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6706,7 +6706,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6874,7 +6874,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7042,7 +7042,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7210,7 +7210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7378,7 +7378,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7546,7 +7546,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7714,7 +7714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7882,7 +7882,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8050,7 +8050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8218,7 +8218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8386,7 +8386,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8554,7 +8554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8722,7 +8722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8890,7 +8890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9058,7 +9058,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9226,7 +9226,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9394,7 +9394,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9562,7 +9562,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9730,7 +9730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9898,7 +9898,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10066,7 +10066,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10234,7 +10234,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10402,7 +10402,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10570,7 +10570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10738,7 +10738,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10906,7 +10906,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11074,7 +11074,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11242,7 +11242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11410,7 +11410,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11578,7 +11578,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11746,7 +11746,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11914,7 +11914,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12082,7 +12082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12250,7 +12250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12418,7 +12418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12586,7 +12586,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12754,7 +12754,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12922,7 +12922,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13090,7 +13090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13258,7 +13258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13426,7 +13426,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13594,7 +13594,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13762,7 +13762,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -13930,7 +13930,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -14098,7 +14098,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -14266,7 +14266,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -14434,7 +14434,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -14602,7 +14602,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -14770,7 +14770,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -14938,7 +14938,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -15106,7 +15106,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -15274,7 +15274,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -15442,7 +15442,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -15610,7 +15610,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -15778,7 +15778,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -15946,7 +15946,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -16114,7 +16114,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -16282,7 +16282,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -16450,7 +16450,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -16618,7 +16618,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -16786,7 +16786,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -16954,7 +16954,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -17122,7 +17122,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17290,7 +17290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17458,7 +17458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17626,7 +17626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17794,7 +17794,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17962,7 +17962,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18130,7 +18130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18298,7 +18298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18466,7 +18466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18634,7 +18634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18802,7 +18802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18970,7 +18970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19138,7 +19138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19306,7 +19306,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19474,7 +19474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19642,7 +19642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19810,7 +19810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19978,7 +19978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20146,7 +20146,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20314,7 +20314,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder123 + }, { // dummy123 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20482,7 +20482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder124 + }, { // dummy124 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20650,7 +20650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20818,7 +20818,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20986,7 +20986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21154,7 +21154,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder128 + }, { // dummy128 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21322,7 +21322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder129 + }, { // dummy129 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21490,7 +21490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder130 + }, { // dummy130 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21658,7 +21658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder131 + }, { // dummy131 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21826,7 +21826,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder132 + }, { // dummy132 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -21994,7 +21994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder133 + }, { // dummy133 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22162,7 +22162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder134 + }, { // dummy134 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22330,7 +22330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder135 + }, { // dummy135 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22498,7 +22498,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder136 + }, { // dummy136 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22666,7 +22666,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder137 + }, { // dummy137 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22834,7 +22834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder138 + }, { // dummy138 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23002,7 +23002,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder139 + }, { // dummy139 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23170,7 +23170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder140 + }, { // dummy140 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23338,7 +23338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder141 + }, { // dummy141 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23506,7 +23506,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder142 + }, { // dummy142 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23674,7 +23674,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder143 + }, { // dummy143 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23842,7 +23842,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder144 + }, { // dummy144 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24010,7 +24010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder145 + }, { // dummy145 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24178,7 +24178,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder146 + }, { // dummy146 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24346,7 +24346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder147 + }, { // dummy147 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24514,7 +24514,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder148 + }, { // dummy148 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24682,7 +24682,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder149 + }, { // dummy149 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24850,7 +24850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder150 + }, { // dummy150 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25018,7 +25018,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder151 + }, { // dummy151 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25186,7 +25186,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder152 + }, { // dummy152 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25354,7 +25354,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder153 + }, { // dummy153 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25522,7 +25522,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder154 + }, { // dummy154 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25690,7 +25690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder155 + }, { // dummy155 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25858,7 +25858,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder156 + }, { // dummy156 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26026,7 +26026,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder157 + }, { // dummy157 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26194,7 +26194,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder158 + }, { // dummy158 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26362,7 +26362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder159 + }, { // dummy159 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26530,7 +26530,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder160 + }, { // dummy160 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26698,7 +26698,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder161 + }, { // dummy161 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26866,7 +26866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder162 + }, { // dummy162 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -27034,7 +27034,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder163 + }, { // dummy163 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -27202,7 +27202,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder164 + }, { // dummy164 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -27370,7 +27370,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder165 + }, { // dummy165 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -27538,7 +27538,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder166 + }, { // dummy166 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -27706,7 +27706,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder167 + }, { // dummy167 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -27874,7 +27874,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder168 + }, { // dummy168 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -28042,7 +28042,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder169 + }, { // dummy169 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -28210,7 +28210,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder170 + }, { // dummy170 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -28378,7 +28378,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder171 + }, { // dummy171 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -28546,7 +28546,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder172 + }, { // dummy172 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -28714,7 +28714,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder173 + }, { // dummy173 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -28882,7 +28882,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder174 + }, { // dummy174 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -29050,7 +29050,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder175 + }, { // dummy175 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -29218,7 +29218,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder176 + }, { // dummy176 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -29386,7 +29386,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder177 + }, { // dummy177 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -29554,7 +29554,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder178 + }, { // dummy178 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -29722,7 +29722,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder179 + }, { // dummy179 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -29890,7 +29890,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder180 + }, { // dummy180 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -30058,7 +30058,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder181 + }, { // dummy181 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -30226,7 +30226,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder182 + }, { // dummy182 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -30394,7 +30394,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder183 + }, { // dummy183 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/space_to_batch_quant8_nonzero.example.cpp b/runtime/test/generated/spec_V1_2/space_to_batch_quant8_nonzero.example.cpp index e148250..31327c4 100644 --- a/runtime/test/generated/spec_V1_2/space_to_batch_quant8_nonzero.example.cpp +++ b/runtime/test/generated/spec_V1_2/space_to_batch_quant8_nonzero.example.cpp
@@ -130,7 +130,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 9 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({9}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 9 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({9}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/space_to_batch_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/space_to_batch_v1_2.example.cpp index 71eaee5..961a815 100644 --- a/runtime/test/generated/spec_V1_2/space_to_batch_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/space_to_batch_v1_2.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -526,7 +526,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -714,7 +714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -902,7 +902,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1090,7 +1090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1278,7 +1278,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1466,7 +1466,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1654,7 +1654,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1842,7 +1842,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2030,7 +2030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2218,7 +2218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2406,7 +2406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2594,7 +2594,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2782,7 +2782,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2970,7 +2970,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3158,7 +3158,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3346,7 +3346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3534,7 +3534,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3722,7 +3722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3910,7 +3910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4098,7 +4098,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4286,7 +4286,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4474,7 +4474,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4662,7 +4662,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4850,7 +4850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5038,7 +5038,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5226,7 +5226,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5414,7 +5414,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5602,7 +5602,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5790,7 +5790,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5978,7 +5978,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6166,7 +6166,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6354,7 +6354,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6542,7 +6542,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6730,7 +6730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6918,7 +6918,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7106,7 +7106,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7294,7 +7294,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7482,7 +7482,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7670,7 +7670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7858,7 +7858,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8046,7 +8046,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8234,7 +8234,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8422,7 +8422,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8610,7 +8610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8798,7 +8798,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -8986,7 +8986,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -9174,7 +9174,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9362,7 +9362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9550,7 +9550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9738,7 +9738,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9926,7 +9926,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10114,7 +10114,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10302,7 +10302,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -10490,7 +10490,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -10678,7 +10678,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10866,7 +10866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11054,7 +11054,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11242,7 +11242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11430,7 +11430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11618,7 +11618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11806,7 +11806,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -11994,7 +11994,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/space_to_depth_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/space_to_depth_v1_2.example.cpp index 8dcacd2..0d9095a 100644 --- a/runtime/test/generated/spec_V1_2/space_to_depth_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/space_to_depth_v1_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1474,7 +1474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1642,7 +1642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2146,7 +2146,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2314,7 +2314,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2482,7 +2482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2650,7 +2650,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2818,7 +2818,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2986,7 +2986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3154,7 +3154,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3322,7 +3322,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3490,7 +3490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3658,7 +3658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3826,7 +3826,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3994,7 +3994,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/split_float_1.example.cpp b/runtime/test/generated/spec_V1_2/split_float_1.example.cpp index 3530287..a10f3ed 100644 --- a/runtime/test/generated/spec_V1_2/split_float_1.example.cpp +++ b/runtime/test/generated/spec_V1_2/split_float_1.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -378,7 +378,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -586,7 +586,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/split_float_2.example.cpp b/runtime/test/generated/spec_V1_2/split_float_2.example.cpp index 90cfeb6..19bef26 100644 --- a/runtime/test/generated/spec_V1_2/split_float_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/split_float_2.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -526,7 +526,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/split_float_3.example.cpp b/runtime/test/generated/spec_V1_2/split_float_3.example.cpp index a619c71..ec9086b 100644 --- a/runtime/test/generated/spec_V1_2/split_float_3.example.cpp +++ b/runtime/test/generated/spec_V1_2/split_float_3.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -378,7 +378,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -586,7 +586,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/split_float_4.example.cpp b/runtime/test/generated/spec_V1_2/split_float_4.example.cpp index b895685..ed03db8 100644 --- a/runtime/test/generated/spec_V1_2/split_float_4.example.cpp +++ b/runtime/test/generated/spec_V1_2/split_float_4.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -526,7 +526,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/split_float_5.example.cpp b/runtime/test/generated/spec_V1_2/split_float_5.example.cpp index 514df7a..1a259be 100644 --- a/runtime/test/generated/spec_V1_2/split_float_5.example.cpp +++ b/runtime/test/generated/spec_V1_2/split_float_5.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -526,7 +526,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/split_quant8_1.example.cpp b/runtime/test/generated/spec_V1_2/split_quant8_1.example.cpp index 58228d2..04d1563 100644 --- a/runtime/test/generated/spec_V1_2/split_quant8_1.example.cpp +++ b/runtime/test/generated/spec_V1_2/split_quant8_1.example.cpp
@@ -170,7 +170,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -378,7 +378,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/split_quant8_2.example.cpp b/runtime/test/generated/spec_V1_2/split_quant8_2.example.cpp index 4e88f12..9f262dc 100644 --- a/runtime/test/generated/spec_V1_2/split_quant8_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/split_quant8_2.example.cpp
@@ -150,7 +150,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 3 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({3}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 3 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({3}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/split_quant8_3.example.cpp b/runtime/test/generated/spec_V1_2/split_quant8_3.example.cpp index 6ae0fc4..6ba95b8 100644 --- a/runtime/test/generated/spec_V1_2/split_quant8_3.example.cpp +++ b/runtime/test/generated/spec_V1_2/split_quant8_3.example.cpp
@@ -170,7 +170,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 3 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({3}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/split_quant8_4.example.cpp b/runtime/test/generated/spec_V1_2/split_quant8_4.example.cpp index dfb38e9..872694e 100644 --- a/runtime/test/generated/spec_V1_2/split_quant8_4.example.cpp +++ b/runtime/test/generated/spec_V1_2/split_quant8_4.example.cpp
@@ -150,7 +150,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/squeeze_float16.example.cpp b/runtime/test/generated/spec_V1_2/squeeze_float16.example.cpp index f5ea464..bb458b2 100644 --- a/runtime/test/generated/spec_V1_2/squeeze_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/squeeze_float16.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/strided_slice_float16.example.cpp b/runtime/test/generated/spec_V1_2/strided_slice_float16.example.cpp index 4cf4571..8c39eba 100644 --- a/runtime/test/generated/spec_V1_2/strided_slice_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/strided_slice_float16.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/strided_slice_invalid.example.cpp b/runtime/test/generated/spec_V1_2/strided_slice_invalid.example.cpp index b8ff8fc..4b74494 100644 --- a/runtime/test/generated/spec_V1_2/strided_slice_invalid.example.cpp +++ b/runtime/test/generated/spec_V1_2/strided_slice_invalid.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/sub_quantized_different_scales.example.cpp b/runtime/test/generated/spec_V1_2/sub_quantized_different_scales.example.cpp index 9aad16e..0eeeefd 100644 --- a/runtime/test/generated/spec_V1_2/sub_quantized_different_scales.example.cpp +++ b/runtime/test/generated/spec_V1_2/sub_quantized_different_scales.example.cpp
@@ -130,7 +130,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -332,7 +332,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -362,7 +362,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -534,7 +534,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -564,7 +564,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -736,7 +736,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -766,7 +766,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -938,7 +938,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -968,7 +968,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -1140,7 +1140,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1170,7 +1170,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -1342,7 +1342,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1372,7 +1372,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -1544,7 +1544,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1574,7 +1574,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -1746,7 +1746,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1776,7 +1776,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -1948,7 +1948,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -2150,7 +2150,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2180,7 +2180,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -2352,7 +2352,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2382,7 +2382,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -2554,7 +2554,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2584,7 +2584,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -2756,7 +2756,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2786,7 +2786,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -2958,7 +2958,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2988,7 +2988,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -3160,7 +3160,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3190,7 +3190,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -3362,7 +3362,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -3392,7 +3392,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3564,7 +3564,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -3594,7 +3594,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3766,7 +3766,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -3796,7 +3796,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -3968,7 +3968,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -3998,7 +3998,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4170,7 +4170,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -4200,7 +4200,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -4372,7 +4372,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -4402,7 +4402,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -4574,7 +4574,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -4604,7 +4604,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -4776,7 +4776,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -4806,7 +4806,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -4978,7 +4978,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -5008,7 +5008,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -5180,7 +5180,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -5210,7 +5210,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -5382,7 +5382,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -5412,7 +5412,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -5584,7 +5584,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -5614,7 +5614,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -5786,7 +5786,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -5816,7 +5816,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -5988,7 +5988,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -6018,7 +6018,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -6190,7 +6190,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -6220,7 +6220,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -6392,7 +6392,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -6422,7 +6422,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -6594,7 +6594,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -6624,7 +6624,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6796,7 +6796,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -6826,7 +6826,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6998,7 +6998,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -7028,7 +7028,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -7200,7 +7200,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -7230,7 +7230,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -7402,7 +7402,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -7432,7 +7432,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -7604,7 +7604,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -7634,7 +7634,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -7806,7 +7806,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -7836,7 +7836,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -8008,7 +8008,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -8038,7 +8038,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -8210,7 +8210,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -8240,7 +8240,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -8412,7 +8412,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -8442,7 +8442,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -8614,7 +8614,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -8644,7 +8644,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -8816,7 +8816,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -8846,7 +8846,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -9018,7 +9018,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -9048,7 +9048,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -9220,7 +9220,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -9250,7 +9250,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -9422,7 +9422,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -9452,7 +9452,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -9624,7 +9624,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -9654,7 +9654,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -9826,7 +9826,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -9856,7 +9856,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -10028,7 +10028,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -10058,7 +10058,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -10230,7 +10230,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -10260,7 +10260,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -10432,7 +10432,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -10462,7 +10462,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -10634,7 +10634,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -10664,7 +10664,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -10836,7 +10836,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -10866,7 +10866,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -11038,7 +11038,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -11068,7 +11068,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -11240,7 +11240,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -11270,7 +11270,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 1 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({1}), .dimensions = {1}, @@ -11442,7 +11442,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -11472,7 +11472,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -11644,7 +11644,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -11674,7 +11674,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -11846,7 +11846,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -11876,7 +11876,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -12048,7 +12048,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -12078,7 +12078,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -12250,7 +12250,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -12280,7 +12280,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -12452,7 +12452,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -12482,7 +12482,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder123 + }, { // dummy123 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -12654,7 +12654,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder124 + }, { // dummy124 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -12684,7 +12684,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -12856,7 +12856,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1}, @@ -12886,7 +12886,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 120 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({120}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/sub_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/sub_v1_2.example.cpp index eab2618..d1cf63c 100644 --- a/runtime/test/generated/spec_V1_2/sub_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/sub_v1_2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -332,7 +332,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -362,7 +362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -534,7 +534,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -564,7 +564,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -736,7 +736,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -766,7 +766,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -938,7 +938,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -968,7 +968,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1140,7 +1140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1170,7 +1170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1342,7 +1342,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1372,7 +1372,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1544,7 +1544,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1574,7 +1574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1746,7 +1746,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1776,7 +1776,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/sub_v1_2_broadcast.example.cpp b/runtime/test/generated/spec_V1_2/sub_v1_2_broadcast.example.cpp index 3aa5418..9a92eb3 100644 --- a/runtime/test/generated/spec_V1_2/sub_v1_2_broadcast.example.cpp +++ b/runtime/test/generated/spec_V1_2/sub_v1_2_broadcast.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -332,7 +332,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -362,7 +362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -534,7 +534,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -564,7 +564,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -736,7 +736,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -766,7 +766,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -938,7 +938,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -968,7 +968,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1140,7 +1140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1170,7 +1170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1342,7 +1342,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1372,7 +1372,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1544,7 +1544,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1574,7 +1574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1746,7 +1746,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1776,7 +1776,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/svdf_bias_present_float16.example.cpp b/runtime/test/generated/spec_V1_2/svdf_bias_present_float16.example.cpp index 9e0d859..efb559e 100644 --- a/runtime/test/generated/spec_V1_2/svdf_bias_present_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/svdf_bias_present_float16.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -320,7 +320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -350,7 +350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/svdf_float16.example.cpp b/runtime/test/generated/spec_V1_2/svdf_float16.example.cpp index 30f094c..9a6535d 100644 --- a/runtime/test/generated/spec_V1_2/svdf_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/svdf_float16.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -320,7 +320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/svdf_state_float16.example.cpp b/runtime/test/generated/spec_V1_2/svdf_state_float16.example.cpp index 8aa7474..d56f65a 100644 --- a/runtime/test/generated/spec_V1_2/svdf_state_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/svdf_state_float16.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -320,7 +320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/tanh_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/tanh_v1_2.example.cpp index e8e26cd..4440436 100644 --- a/runtime/test/generated/spec_V1_2/tanh_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/tanh_v1_2.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/tile_1.example.cpp b/runtime/test/generated/spec_V1_2/tile_1.example.cpp index 14c59e5..a51011f 100644 --- a/runtime/test/generated/spec_V1_2/tile_1.example.cpp +++ b/runtime/test/generated/spec_V1_2/tile_1.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/tile_2.example.cpp b/runtime/test/generated/spec_V1_2/tile_2.example.cpp index e425b55..35a0c44 100644 --- a/runtime/test/generated/spec_V1_2/tile_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/tile_2.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/tile_3.example.cpp b/runtime/test/generated/spec_V1_2/tile_3.example.cpp index e578c51..33c6082 100644 --- a/runtime/test/generated/spec_V1_2/tile_3.example.cpp +++ b/runtime/test/generated/spec_V1_2/tile_3.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 127 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({127}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/topk_v2.example.cpp b/runtime/test/generated/spec_V1_2/topk_v2.example.cpp index 26db9b6..9c93b7b 100644 --- a/runtime/test/generated/spec_V1_2/topk_v2.example.cpp +++ b/runtime/test/generated/spec_V1_2/topk_v2.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1474,7 +1474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1642,7 +1642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2146,7 +2146,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2314,7 +2314,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2482,7 +2482,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/transpose_conv2d.example.cpp b/runtime/test/generated/spec_V1_2/transpose_conv2d.example.cpp index edf86cc..95653fd 100644 --- a/runtime/test/generated/spec_V1_2/transpose_conv2d.example.cpp +++ b/runtime/test/generated/spec_V1_2/transpose_conv2d.example.cpp
@@ -250,7 +250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -538,7 +538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -568,7 +568,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -598,7 +598,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -894,7 +894,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1182,7 +1182,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1212,7 +1212,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1242,7 +1242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1538,7 +1538,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1826,7 +1826,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -1856,7 +1856,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -2148,7 +2148,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2436,7 +2436,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2466,7 +2466,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2764,7 +2764,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3058,7 +3058,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3352,7 +3352,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3646,7 +3646,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -3934,7 +3934,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4222,7 +4222,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4252,7 +4252,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4282,7 +4282,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4578,7 +4578,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4866,7 +4866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4896,7 +4896,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4926,7 +4926,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5222,7 +5222,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5510,7 +5510,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5540,7 +5540,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5570,7 +5570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5866,7 +5866,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6154,7 +6154,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6184,7 +6184,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -6476,7 +6476,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -6764,7 +6764,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -6794,7 +6794,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7092,7 +7092,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -7386,7 +7386,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -7680,7 +7680,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -7974,7 +7974,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -8262,7 +8262,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8550,7 +8550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8580,7 +8580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8610,7 +8610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8906,7 +8906,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9194,7 +9194,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9224,7 +9224,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9254,7 +9254,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9550,7 +9550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9838,7 +9838,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9868,7 +9868,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9898,7 +9898,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10194,7 +10194,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -10482,7 +10482,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -10512,7 +10512,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -10804,7 +10804,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -11092,7 +11092,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -11122,7 +11122,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -11420,7 +11420,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -11714,7 +11714,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -12008,7 +12008,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -12302,7 +12302,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -12590,7 +12590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12878,7 +12878,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12908,7 +12908,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12938,7 +12938,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13234,7 +13234,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13522,7 +13522,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13552,7 +13552,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13582,7 +13582,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13878,7 +13878,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14166,7 +14166,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14196,7 +14196,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14226,7 +14226,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14522,7 +14522,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -14810,7 +14810,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -14840,7 +14840,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -15132,7 +15132,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -15420,7 +15420,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -15450,7 +15450,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -15748,7 +15748,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -16042,7 +16042,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -16336,7 +16336,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -16630,7 +16630,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -16918,7 +16918,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -17206,7 +17206,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -17236,7 +17236,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -17266,7 +17266,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -17562,7 +17562,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17850,7 +17850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17880,7 +17880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17910,7 +17910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18206,7 +18206,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18494,7 +18494,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18524,7 +18524,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18554,7 +18554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18850,7 +18850,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -19138,7 +19138,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -19168,7 +19168,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -19460,7 +19460,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -19748,7 +19748,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -19778,7 +19778,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -20076,7 +20076,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -20370,7 +20370,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -20664,7 +20664,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -20958,7 +20958,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -21246,7 +21246,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21534,7 +21534,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21564,7 +21564,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21594,7 +21594,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21890,7 +21890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22178,7 +22178,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22208,7 +22208,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22238,7 +22238,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22534,7 +22534,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22822,7 +22822,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22852,7 +22852,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22882,7 +22882,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23178,7 +23178,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -23466,7 +23466,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -23496,7 +23496,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -23788,7 +23788,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -24076,7 +24076,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -24106,7 +24106,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder123 + }, { // dummy123 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -24404,7 +24404,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder124 + }, { // dummy124 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -24698,7 +24698,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -24992,7 +24992,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -25286,7 +25286,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -25574,7 +25574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder128 + }, { // dummy128 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25862,7 +25862,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder129 + }, { // dummy129 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25892,7 +25892,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder130 + }, { // dummy130 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25922,7 +25922,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder131 + }, { // dummy131 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -26218,7 +26218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder132 + }, { // dummy132 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26506,7 +26506,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder133 + }, { // dummy133 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26536,7 +26536,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder134 + }, { // dummy134 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26566,7 +26566,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder135 + }, { // dummy135 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26862,7 +26862,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder136 + }, { // dummy136 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27150,7 +27150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder137 + }, { // dummy137 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27180,7 +27180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder138 + }, { // dummy138 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27210,7 +27210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder139 + }, { // dummy139 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27506,7 +27506,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder140 + }, { // dummy140 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -27794,7 +27794,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder141 + }, { // dummy141 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -27824,7 +27824,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder142 + }, { // dummy142 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -28116,7 +28116,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder143 + }, { // dummy143 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -28404,7 +28404,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder144 + }, { // dummy144 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -28434,7 +28434,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder145 + }, { // dummy145 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -28732,7 +28732,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder146 + }, { // dummy146 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -29026,7 +29026,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder147 + }, { // dummy147 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -29320,7 +29320,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder148 + }, { // dummy148 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -29614,7 +29614,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder149 + }, { // dummy149 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -29902,7 +29902,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder150 + }, { // dummy150 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30190,7 +30190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder151 + }, { // dummy151 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30220,7 +30220,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder152 + }, { // dummy152 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30250,7 +30250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder153 + }, { // dummy153 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30546,7 +30546,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder154 + }, { // dummy154 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -30834,7 +30834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder155 + }, { // dummy155 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -30864,7 +30864,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder156 + }, { // dummy156 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -30894,7 +30894,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder157 + }, { // dummy157 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31190,7 +31190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder158 + }, { // dummy158 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31478,7 +31478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder159 + }, { // dummy159 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31508,7 +31508,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder160 + }, { // dummy160 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31538,7 +31538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder161 + }, { // dummy161 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31834,7 +31834,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder162 + }, { // dummy162 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -32122,7 +32122,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder163 + }, { // dummy163 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -32152,7 +32152,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder164 + }, { // dummy164 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -32444,7 +32444,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder165 + }, { // dummy165 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -32732,7 +32732,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder166 + }, { // dummy166 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -32762,7 +32762,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder167 + }, { // dummy167 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -33060,7 +33060,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder168 + }, { // dummy168 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -33354,7 +33354,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder169 + }, { // dummy169 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -33648,7 +33648,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder170 + }, { // dummy170 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -33942,7 +33942,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder171 + }, { // dummy171 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -34230,7 +34230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder172 + }, { // dummy172 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -34518,7 +34518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder173 + }, { // dummy173 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -34548,7 +34548,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder174 + }, { // dummy174 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -34578,7 +34578,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder175 + }, { // dummy175 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -34874,7 +34874,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder176 + }, { // dummy176 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -35162,7 +35162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder177 + }, { // dummy177 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -35192,7 +35192,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder178 + }, { // dummy178 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -35222,7 +35222,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder179 + }, { // dummy179 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -35518,7 +35518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder180 + }, { // dummy180 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -35806,7 +35806,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder181 + }, { // dummy181 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -35836,7 +35836,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder182 + }, { // dummy182 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -35866,7 +35866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder183 + }, { // dummy183 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -36162,7 +36162,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder184 + }, { // dummy184 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -36450,7 +36450,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder185 + }, { // dummy185 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -36480,7 +36480,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder186 + }, { // dummy186 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -36778,7 +36778,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder187 + }, { // dummy187 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -37072,7 +37072,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder188 + }, { // dummy188 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -37360,7 +37360,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder189 + }, { // dummy189 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -37648,7 +37648,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder190 + }, { // dummy190 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -37678,7 +37678,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder191 + }, { // dummy191 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -37708,7 +37708,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder192 + }, { // dummy192 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -38004,7 +38004,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder193 + }, { // dummy193 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38292,7 +38292,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder194 + }, { // dummy194 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38322,7 +38322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder195 + }, { // dummy195 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38352,7 +38352,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder196 + }, { // dummy196 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38648,7 +38648,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder197 + }, { // dummy197 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38936,7 +38936,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder198 + }, { // dummy198 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38966,7 +38966,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder199 + }, { // dummy199 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38996,7 +38996,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder200 + }, { // dummy200 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -39292,7 +39292,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder201 + }, { // dummy201 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -39580,7 +39580,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder202 + }, { // dummy202 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -39610,7 +39610,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder203 + }, { // dummy203 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -39908,7 +39908,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder204 + }, { // dummy204 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -40202,7 +40202,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder205 + }, { // dummy205 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -40490,7 +40490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder206 + }, { // dummy206 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -40778,7 +40778,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder207 + }, { // dummy207 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -40808,7 +40808,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder208 + }, { // dummy208 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -40838,7 +40838,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder209 + }, { // dummy209 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -41134,7 +41134,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder210 + }, { // dummy210 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41422,7 +41422,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder211 + }, { // dummy211 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41452,7 +41452,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder212 + }, { // dummy212 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41482,7 +41482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder213 + }, { // dummy213 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41778,7 +41778,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder214 + }, { // dummy214 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42066,7 +42066,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder215 + }, { // dummy215 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42096,7 +42096,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder216 + }, { // dummy216 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42126,7 +42126,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder217 + }, { // dummy217 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42422,7 +42422,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder218 + }, { // dummy218 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -42710,7 +42710,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder219 + }, { // dummy219 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -42740,7 +42740,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder220 + }, { // dummy220 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -43032,7 +43032,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder221 + }, { // dummy221 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -43320,7 +43320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder222 + }, { // dummy222 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -43350,7 +43350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder223 + }, { // dummy223 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -43380,7 +43380,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder224 + }, { // dummy224 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -43676,7 +43676,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder225 + }, { // dummy225 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -43964,7 +43964,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder226 + }, { // dummy226 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -43994,7 +43994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder227 + }, { // dummy227 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -44024,7 +44024,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder228 + }, { // dummy228 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -44320,7 +44320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder229 + }, { // dummy229 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -44608,7 +44608,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder230 + }, { // dummy230 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -44638,7 +44638,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder231 + }, { // dummy231 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -44668,7 +44668,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder232 + }, { // dummy232 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -44964,7 +44964,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder233 + }, { // dummy233 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -45252,7 +45252,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder234 + }, { // dummy234 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -45282,7 +45282,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder235 + }, { // dummy235 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -45574,7 +45574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder236 + }, { // dummy236 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -45862,7 +45862,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder237 + }, { // dummy237 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -45892,7 +45892,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder238 + }, { // dummy238 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -45922,7 +45922,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder239 + }, { // dummy239 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -46218,7 +46218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder240 + }, { // dummy240 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -46506,7 +46506,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder241 + }, { // dummy241 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -46536,7 +46536,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder242 + }, { // dummy242 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -46566,7 +46566,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder243 + }, { // dummy243 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -46862,7 +46862,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder244 + }, { // dummy244 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -47150,7 +47150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder245 + }, { // dummy245 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -47180,7 +47180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder246 + }, { // dummy246 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -47210,7 +47210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder247 + }, { // dummy247 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -47506,7 +47506,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 10 - }, { // placeholder248 + }, { // dummy248 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({10}), .dimensions = {1}, @@ -47794,7 +47794,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 10 - }, { // placeholder249 + }, { // dummy249 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({10}), .dimensions = {1}, @@ -47824,7 +47824,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder250 + }, { // dummy250 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -48116,7 +48116,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder251 + }, { // dummy251 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -48404,7 +48404,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder252 + }, { // dummy252 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -48434,7 +48434,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder253 + }, { // dummy253 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -48464,7 +48464,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder254 + }, { // dummy254 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -48760,7 +48760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder255 + }, { // dummy255 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49048,7 +49048,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder256 + }, { // dummy256 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49078,7 +49078,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder257 + }, { // dummy257 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49108,7 +49108,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder258 + }, { // dummy258 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49404,7 +49404,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder259 + }, { // dummy259 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49692,7 +49692,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder260 + }, { // dummy260 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49722,7 +49722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder261 + }, { // dummy261 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49752,7 +49752,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder262 + }, { // dummy262 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -50048,7 +50048,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 10 - }, { // placeholder263 + }, { // dummy263 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({10}), .dimensions = {1}, @@ -50336,7 +50336,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 10 - }, { // placeholder264 + }, { // dummy264 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({10}), .dimensions = {1}, @@ -50366,7 +50366,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder265 + }, { // dummy265 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -50658,7 +50658,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder266 + }, { // dummy266 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -50946,7 +50946,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder267 + }, { // dummy267 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -50976,7 +50976,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder268 + }, { // dummy268 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -51006,7 +51006,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder269 + }, { // dummy269 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -51342,7 +51342,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder270 + }, { // dummy270 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51670,7 +51670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder271 + }, { // dummy271 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51700,7 +51700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder272 + }, { // dummy272 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51730,7 +51730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder273 + }, { // dummy273 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -52066,7 +52066,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder274 + }, { // dummy274 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -52394,7 +52394,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder275 + }, { // dummy275 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -52424,7 +52424,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder276 + }, { // dummy276 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -52454,7 +52454,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder277 + }, { // dummy277 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -52790,7 +52790,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder278 + }, { // dummy278 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -53118,7 +53118,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder279 + }, { // dummy279 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -53148,7 +53148,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder280 + }, { // dummy280 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -53480,7 +53480,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder281 + }, { // dummy281 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -53808,7 +53808,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder282 + }, { // dummy282 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -53838,7 +53838,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder283 + }, { // dummy283 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -53868,7 +53868,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder284 + }, { // dummy284 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -54204,7 +54204,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder285 + }, { // dummy285 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -54532,7 +54532,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder286 + }, { // dummy286 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -54562,7 +54562,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder287 + }, { // dummy287 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -54592,7 +54592,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder288 + }, { // dummy288 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -54928,7 +54928,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder289 + }, { // dummy289 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -55256,7 +55256,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder290 + }, { // dummy290 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -55286,7 +55286,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder291 + }, { // dummy291 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -55316,7 +55316,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder292 + }, { // dummy292 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -55652,7 +55652,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder293 + }, { // dummy293 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -55980,7 +55980,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder294 + }, { // dummy294 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -56010,7 +56010,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder295 + }, { // dummy295 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -56342,7 +56342,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder296 + }, { // dummy296 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -56670,7 +56670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder297 + }, { // dummy297 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -56700,7 +56700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder298 + }, { // dummy298 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -56730,7 +56730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder299 + }, { // dummy299 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -62546,7 +62546,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder300 + }, { // dummy300 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -62834,7 +62834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder301 + }, { // dummy301 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -62864,7 +62864,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder302 + }, { // dummy302 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -62894,7 +62894,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder303 + }, { // dummy303 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -63190,7 +63190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder304 + }, { // dummy304 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -63478,7 +63478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder305 + }, { // dummy305 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -63508,7 +63508,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder306 + }, { // dummy306 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -63538,7 +63538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder307 + }, { // dummy307 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -63834,7 +63834,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder308 + }, { // dummy308 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -64122,7 +64122,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder309 + }, { // dummy309 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -64152,7 +64152,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder310 + }, { // dummy310 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -64444,7 +64444,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder311 + }, { // dummy311 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -64732,7 +64732,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder312 + }, { // dummy312 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -64762,7 +64762,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder313 + }, { // dummy313 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -64792,7 +64792,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder314 + }, { // dummy314 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -65088,7 +65088,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder315 + }, { // dummy315 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -65376,7 +65376,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder316 + }, { // dummy316 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -65406,7 +65406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder317 + }, { // dummy317 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -65436,7 +65436,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder318 + }, { // dummy318 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -65732,7 +65732,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder319 + }, { // dummy319 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -66020,7 +66020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder320 + }, { // dummy320 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -66050,7 +66050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder321 + }, { // dummy321 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -66080,7 +66080,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder322 + }, { // dummy322 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -66376,7 +66376,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder323 + }, { // dummy323 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -66664,7 +66664,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder324 + }, { // dummy324 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -66694,7 +66694,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder325 + }, { // dummy325 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -66986,7 +66986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder326 + }, { // dummy326 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -67274,7 +67274,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder327 + }, { // dummy327 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -67304,7 +67304,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder328 + }, { // dummy328 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -67334,7 +67334,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder329 + }, { // dummy329 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/transpose_conv2d_large.example.cpp b/runtime/test/generated/spec_V1_2/transpose_conv2d_large.example.cpp index 3c2db392..da4b9e0 100644 --- a/runtime/test/generated/spec_V1_2/transpose_conv2d_large.example.cpp +++ b/runtime/test/generated/spec_V1_2/transpose_conv2d_large.example.cpp
@@ -250,7 +250,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -538,7 +538,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -568,7 +568,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -866,7 +866,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1160,7 +1160,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/transpose_float16.example.cpp b/runtime/test/generated/spec_V1_2/transpose_float16.example.cpp index 09eb6b1..73ba862 100644 --- a/runtime/test/generated/spec_V1_2/transpose_float16.example.cpp +++ b/runtime/test/generated/spec_V1_2/transpose_float16.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/transpose_v1_2.example.cpp b/runtime/test/generated/spec_V1_2/transpose_v1_2.example.cpp index e7e447f..539cce2 100644 --- a/runtime/test/generated/spec_V1_2/transpose_v1_2.example.cpp +++ b/runtime/test/generated/spec_V1_2/transpose_v1_2.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_1step.example.cpp b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_1step.example.cpp index 0ddac5d..1f4ccfa 100644 --- a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_1step.example.cpp +++ b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_1step.example.cpp
@@ -630,7 +630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -660,7 +660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -690,7 +690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -720,7 +720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -750,7 +750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -780,7 +780,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -810,7 +810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -840,7 +840,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -870,7 +870,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -900,7 +900,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -930,7 +930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -960,7 +960,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -990,7 +990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1020,7 +1020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1050,7 +1050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1080,7 +1080,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1110,7 +1110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1140,7 +1140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1170,7 +1170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1200,7 +1200,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1230,7 +1230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1260,7 +1260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1290,7 +1290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_batch_major_norm_peephole_projection.example.cpp b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_batch_major_norm_peephole_projection.example.cpp index d7a669c..eef91ce 100644 --- a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_batch_major_norm_peephole_projection.example.cpp +++ b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_batch_major_norm_peephole_projection.example.cpp
@@ -630,7 +630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -660,7 +660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -690,7 +690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -720,7 +720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -750,7 +750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -780,7 +780,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -810,7 +810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -840,7 +840,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -870,7 +870,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -900,7 +900,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -930,7 +930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -960,7 +960,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -990,7 +990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1020,7 +1020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1050,7 +1050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1080,7 +1080,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1110,7 +1110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1140,7 +1140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1170,7 +1170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1200,7 +1200,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1230,7 +1230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1260,7 +1260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1290,7 +1290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_batch_major_peephole_projection_bias.example.cpp b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_batch_major_peephole_projection_bias.example.cpp index 2e7eb17..73b2918 100644 --- a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_batch_major_peephole_projection_bias.example.cpp +++ b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_batch_major_peephole_projection_bias.example.cpp
@@ -630,7 +630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -660,7 +660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -690,7 +690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -720,7 +720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -750,7 +750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -780,7 +780,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -810,7 +810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -840,7 +840,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -870,7 +870,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -900,7 +900,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -930,7 +930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -960,7 +960,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -990,7 +990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1020,7 +1020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1050,7 +1050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1080,7 +1080,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1110,7 +1110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1140,7 +1140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1170,7 +1170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1200,7 +1200,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_cifg_peephole.example.cpp b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_cifg_peephole.example.cpp index 6b7f1d8..bf62dd9 100644 --- a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_cifg_peephole.example.cpp +++ b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_cifg_peephole.example.cpp
@@ -630,7 +630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -660,7 +660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -690,7 +690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -720,7 +720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -750,7 +750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -780,7 +780,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -810,7 +810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -840,7 +840,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -870,7 +870,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -900,7 +900,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -930,7 +930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -960,7 +960,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -990,7 +990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1020,7 +1020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_f16_batch_major.example.cpp b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_f16_batch_major.example.cpp index 3c3030a..709b279 100644 --- a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_f16_batch_major.example.cpp +++ b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_f16_batch_major.example.cpp
@@ -630,7 +630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -660,7 +660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -690,7 +690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -720,7 +720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -750,7 +750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -780,7 +780,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -810,7 +810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -840,7 +840,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -870,7 +870,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -900,7 +900,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -930,7 +930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -960,7 +960,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -990,7 +990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1020,7 +1020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1050,7 +1050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_f16_norm_peephole_projection.example.cpp b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_f16_norm_peephole_projection.example.cpp index 14c9692..0c35bce 100644 --- a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_f16_norm_peephole_projection.example.cpp +++ b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_f16_norm_peephole_projection.example.cpp
@@ -630,7 +630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -660,7 +660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -690,7 +690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -720,7 +720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -750,7 +750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -780,7 +780,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -810,7 +810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -840,7 +840,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -870,7 +870,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -900,7 +900,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -930,7 +930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -960,7 +960,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -990,7 +990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1020,7 +1020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1050,7 +1050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1080,7 +1080,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1110,7 +1110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1140,7 +1140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1170,7 +1170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1200,7 +1200,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1230,7 +1230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1260,7 +1260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1290,7 +1290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_layer_norm_cifg_peephole.example.cpp b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_layer_norm_cifg_peephole.example.cpp index 61b9152..672c49c 100644 --- a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_layer_norm_cifg_peephole.example.cpp +++ b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_layer_norm_cifg_peephole.example.cpp
@@ -630,7 +630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -660,7 +660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -690,7 +690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -720,7 +720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -750,7 +750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -780,7 +780,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -810,7 +810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -840,7 +840,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -870,7 +870,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -900,7 +900,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -930,7 +930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -960,7 +960,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -990,7 +990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1020,7 +1020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1050,7 +1050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1080,7 +1080,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1110,7 +1110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1140,7 +1140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1876,7 +1876,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1906,7 +1906,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1936,7 +1936,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1966,7 +1966,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1996,7 +1996,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2026,7 +2026,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2056,7 +2056,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2086,7 +2086,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2116,7 +2116,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2146,7 +2146,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2176,7 +2176,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2206,7 +2206,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2236,7 +2236,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2266,7 +2266,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2296,7 +2296,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2326,7 +2326,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2356,7 +2356,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2386,7 +2386,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3122,7 +3122,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3152,7 +3152,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3182,7 +3182,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3212,7 +3212,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3242,7 +3242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3272,7 +3272,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3302,7 +3302,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3332,7 +3332,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3362,7 +3362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3392,7 +3392,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3422,7 +3422,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3452,7 +3452,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3482,7 +3482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3512,7 +3512,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3542,7 +3542,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3572,7 +3572,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3602,7 +3602,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3632,7 +3632,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_norm_peephole_projection.example.cpp b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_norm_peephole_projection.example.cpp index 4ebf0a2..3ab8359 100644 --- a/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_norm_peephole_projection.example.cpp +++ b/runtime/test/generated/spec_V1_2/unidirectional_sequence_lstm_norm_peephole_projection.example.cpp
@@ -630,7 +630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -660,7 +660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -690,7 +690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -720,7 +720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -750,7 +750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -780,7 +780,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -810,7 +810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -840,7 +840,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -870,7 +870,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -900,7 +900,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -930,7 +930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -960,7 +960,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -990,7 +990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1020,7 +1020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1050,7 +1050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1080,7 +1080,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1110,7 +1110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1140,7 +1140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1170,7 +1170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1200,7 +1200,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1230,7 +1230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1260,7 +1260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1290,7 +1290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_2/unidirectional_sequence_rnn.example.cpp b/runtime/test/generated/spec_V1_2/unidirectional_sequence_rnn.example.cpp index 0bf6405..842714d 100644 --- a/runtime/test/generated/spec_V1_2/unidirectional_sequence_rnn.example.cpp +++ b/runtime/test/generated/spec_V1_2/unidirectional_sequence_rnn.example.cpp
@@ -210,7 +210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -240,7 +240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -270,7 +270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -300,7 +300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -330,7 +330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -594,7 +594,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -624,7 +624,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -654,7 +654,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -684,7 +684,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -714,7 +714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -978,7 +978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1008,7 +1008,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1038,7 +1038,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1068,7 +1068,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1098,7 +1098,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1362,7 +1362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1392,7 +1392,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1422,7 +1422,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1452,7 +1452,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1482,7 +1482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1746,7 +1746,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1776,7 +1776,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1806,7 +1806,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1836,7 +1836,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1866,7 +1866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2130,7 +2130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2160,7 +2160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2190,7 +2190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2220,7 +2220,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2250,7 +2250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/add_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/add_quant8_signed.example.cpp index 86e790e..529dd1a 100644 --- a/runtime/test/generated/spec_V1_3/add_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/add_quant8_signed.example.cpp
@@ -130,7 +130,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -332,7 +332,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -362,7 +362,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/argmax_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/argmax_quant8_signed.example.cpp index 2fbfa55..c53005e 100644 --- a/runtime/test/generated/spec_V1_3/argmax_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/argmax_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/argmin_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/argmin_quant8_signed.example.cpp index d40b6fa..58017c7 100644 --- a/runtime/test/generated/spec_V1_3/argmin_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/argmin_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/avg_pool_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/avg_pool_quant8_signed.example.cpp index 80ee417..4fc524f 100644 --- a/runtime/test/generated/spec_V1_3/avg_pool_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/avg_pool_quant8_signed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -358,7 +358,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -566,7 +566,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -754,7 +754,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -942,7 +942,7 @@ .scale = 0.0625f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1270,7 +1270,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1598,7 +1598,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1926,7 +1926,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2254,7 +2254,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2582,7 +2582,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2910,7 +2910,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3238,7 +3238,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3566,7 +3566,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3834,7 +3834,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4102,7 +4102,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/axis_aligned_bbox_transform_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/axis_aligned_bbox_transform_quant8_signed.example.cpp index de23801..08f082c 100644 --- a/runtime/test/generated/spec_V1_3/axis_aligned_bbox_transform_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/axis_aligned_bbox_transform_quant8_signed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/batch_to_space_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/batch_to_space_quant8_signed.example.cpp index 962c7d4..bfd525d 100644 --- a/runtime/test/generated/spec_V1_3/batch_to_space_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/batch_to_space_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -426,7 +426,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -594,7 +594,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -762,7 +762,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -930,7 +930,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/bidirectional_sequence_lstm.example.cpp b/runtime/test/generated/spec_V1_3/bidirectional_sequence_lstm.example.cpp index a985546..a7772bd 100644 --- a/runtime/test/generated/spec_V1_3/bidirectional_sequence_lstm.example.cpp +++ b/runtime/test/generated/spec_V1_3/bidirectional_sequence_lstm.example.cpp
@@ -1310,7 +1310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1340,7 +1340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1370,7 +1370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1400,7 +1400,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1430,7 +1430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1460,7 +1460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1490,7 +1490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1520,7 +1520,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1550,7 +1550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1580,7 +1580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1610,7 +1610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1640,7 +1640,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1670,7 +1670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1700,7 +1700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1730,7 +1730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1760,7 +1760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1790,7 +1790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1820,7 +1820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1850,7 +1850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1880,7 +1880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1910,7 +1910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1940,7 +1940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1970,7 +1970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2000,7 +2000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2030,7 +2030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2060,7 +2060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2090,7 +2090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2120,7 +2120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2150,7 +2150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2180,7 +2180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3644,7 +3644,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3674,7 +3674,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3704,7 +3704,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3734,7 +3734,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3764,7 +3764,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3794,7 +3794,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3824,7 +3824,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3854,7 +3854,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3884,7 +3884,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3914,7 +3914,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3944,7 +3944,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3974,7 +3974,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4004,7 +4004,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4034,7 +4034,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4064,7 +4064,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4094,7 +4094,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4124,7 +4124,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4154,7 +4154,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4184,7 +4184,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4214,7 +4214,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4244,7 +4244,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4274,7 +4274,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4304,7 +4304,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4334,7 +4334,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4364,7 +4364,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4394,7 +4394,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4424,7 +4424,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4454,7 +4454,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4484,7 +4484,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4514,7 +4514,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/bidirectional_sequence_lstm_state_output.example.cpp b/runtime/test/generated/spec_V1_3/bidirectional_sequence_lstm_state_output.example.cpp index ea294e0..61274df 100644 --- a/runtime/test/generated/spec_V1_3/bidirectional_sequence_lstm_state_output.example.cpp +++ b/runtime/test/generated/spec_V1_3/bidirectional_sequence_lstm_state_output.example.cpp
@@ -1390,7 +1390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1420,7 +1420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1450,7 +1450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1480,7 +1480,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1510,7 +1510,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1540,7 +1540,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1570,7 +1570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1600,7 +1600,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1630,7 +1630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1660,7 +1660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1690,7 +1690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1720,7 +1720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1750,7 +1750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1780,7 +1780,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1840,7 +1840,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1870,7 +1870,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1900,7 +1900,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1930,7 +1930,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1960,7 +1960,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1990,7 +1990,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2020,7 +2020,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2050,7 +2050,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2080,7 +2080,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2110,7 +2110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2140,7 +2140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2170,7 +2170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2200,7 +2200,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2230,7 +2230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/bidirectional_sequence_rnn_1_3.example.cpp b/runtime/test/generated/spec_V1_3/bidirectional_sequence_rnn_1_3.example.cpp index a4fd7a9..886f860 100644 --- a/runtime/test/generated/spec_V1_3/bidirectional_sequence_rnn_1_3.example.cpp +++ b/runtime/test/generated/spec_V1_3/bidirectional_sequence_rnn_1_3.example.cpp
@@ -390,7 +390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -420,7 +420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -450,7 +450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -480,7 +480,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -510,7 +510,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -540,7 +540,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -570,7 +570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -600,7 +600,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -630,7 +630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -660,7 +660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1124,7 +1124,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1154,7 +1154,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1184,7 +1184,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1214,7 +1214,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1244,7 +1244,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1274,7 +1274,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1304,7 +1304,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1334,7 +1334,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1364,7 +1364,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1394,7 +1394,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1858,7 +1858,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1888,7 +1888,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1918,7 +1918,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1948,7 +1948,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2008,7 +2008,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2038,7 +2038,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2068,7 +2068,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2098,7 +2098,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2128,7 +2128,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2592,7 +2592,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2622,7 +2622,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2652,7 +2652,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2682,7 +2682,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2712,7 +2712,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2742,7 +2742,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2772,7 +2772,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2802,7 +2802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2832,7 +2832,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2862,7 +2862,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3326,7 +3326,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3356,7 +3356,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3386,7 +3386,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3416,7 +3416,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3446,7 +3446,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3476,7 +3476,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3506,7 +3506,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3536,7 +3536,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3566,7 +3566,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3596,7 +3596,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4060,7 +4060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4090,7 +4090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4120,7 +4120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4150,7 +4150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4180,7 +4180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4210,7 +4210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4240,7 +4240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4270,7 +4270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4300,7 +4300,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4330,7 +4330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4774,7 +4774,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4804,7 +4804,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4834,7 +4834,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4864,7 +4864,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4894,7 +4894,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4924,7 +4924,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4954,7 +4954,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4984,7 +4984,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5014,7 +5014,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5044,7 +5044,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5488,7 +5488,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5518,7 +5518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5548,7 +5548,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5578,7 +5578,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5608,7 +5608,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5638,7 +5638,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5668,7 +5668,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5698,7 +5698,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5728,7 +5728,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5758,7 +5758,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6202,7 +6202,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6232,7 +6232,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6262,7 +6262,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6292,7 +6292,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6322,7 +6322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6352,7 +6352,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6382,7 +6382,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6412,7 +6412,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6442,7 +6442,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6472,7 +6472,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/bidirectional_sequence_rnn_state_output.example.cpp b/runtime/test/generated/spec_V1_3/bidirectional_sequence_rnn_state_output.example.cpp index 6ca3326..5ada9cc 100644 --- a/runtime/test/generated/spec_V1_3/bidirectional_sequence_rnn_state_output.example.cpp +++ b/runtime/test/generated/spec_V1_3/bidirectional_sequence_rnn_state_output.example.cpp
@@ -430,7 +430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -460,7 +460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -490,7 +490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -520,7 +520,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -550,7 +550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -580,7 +580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -610,7 +610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -640,7 +640,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -670,7 +670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1170,7 +1170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1200,7 +1200,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1230,7 +1230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1260,7 +1260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1290,7 +1290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1320,7 +1320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1350,7 +1350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1380,7 +1380,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1410,7 +1410,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1910,7 +1910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1940,7 +1940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1970,7 +1970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2000,7 +2000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2030,7 +2030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2060,7 +2060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2090,7 +2090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2120,7 +2120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2150,7 +2150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2650,7 +2650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2680,7 +2680,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2710,7 +2710,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2740,7 +2740,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2770,7 +2770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2800,7 +2800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2830,7 +2830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2860,7 +2860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2890,7 +2890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3390,7 +3390,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3420,7 +3420,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3450,7 +3450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3480,7 +3480,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3510,7 +3510,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3540,7 +3540,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3570,7 +3570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3600,7 +3600,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3630,7 +3630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4130,7 +4130,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4160,7 +4160,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4190,7 +4190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4220,7 +4220,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4250,7 +4250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4280,7 +4280,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4310,7 +4310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4340,7 +4340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4370,7 +4370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4850,7 +4850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4880,7 +4880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4910,7 +4910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4940,7 +4940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4970,7 +4970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5000,7 +5000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5030,7 +5030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5060,7 +5060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5090,7 +5090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5570,7 +5570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5600,7 +5600,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5630,7 +5630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5660,7 +5660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5690,7 +5690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5720,7 +5720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5750,7 +5750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5780,7 +5780,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5810,7 +5810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6290,7 +6290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6320,7 +6320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6350,7 +6350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6380,7 +6380,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6410,7 +6410,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6440,7 +6440,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6470,7 +6470,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6500,7 +6500,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6530,7 +6530,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7030,7 +7030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7060,7 +7060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7090,7 +7090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7120,7 +7120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7150,7 +7150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7180,7 +7180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7210,7 +7210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7240,7 +7240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7270,7 +7270,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7770,7 +7770,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7800,7 +7800,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7830,7 +7830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7860,7 +7860,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7890,7 +7890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7920,7 +7920,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7950,7 +7950,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7980,7 +7980,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8010,7 +8010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8510,7 +8510,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8540,7 +8540,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8570,7 +8570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8600,7 +8600,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8630,7 +8630,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8660,7 +8660,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8690,7 +8690,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8720,7 +8720,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8750,7 +8750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9250,7 +9250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9280,7 +9280,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9310,7 +9310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9340,7 +9340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9370,7 +9370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9400,7 +9400,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9430,7 +9430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9460,7 +9460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9490,7 +9490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9520,7 +9520,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9550,7 +9550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9580,7 +9580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10092,7 +10092,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10122,7 +10122,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10152,7 +10152,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10182,7 +10182,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder123 + }, { // dummy123 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10212,7 +10212,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder124 + }, { // dummy124 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10242,7 +10242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10272,7 +10272,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10302,7 +10302,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10332,7 +10332,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder128 + }, { // dummy128 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10362,7 +10362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder129 + }, { // dummy129 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10392,7 +10392,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder130 + }, { // dummy130 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10422,7 +10422,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder131 + }, { // dummy131 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10934,7 +10934,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder132 + }, { // dummy132 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10964,7 +10964,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder133 + }, { // dummy133 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10994,7 +10994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder134 + }, { // dummy134 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11024,7 +11024,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder135 + }, { // dummy135 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11054,7 +11054,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder136 + }, { // dummy136 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11084,7 +11084,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder137 + }, { // dummy137 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11114,7 +11114,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder138 + }, { // dummy138 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11144,7 +11144,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder139 + }, { // dummy139 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11174,7 +11174,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder140 + }, { // dummy140 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11204,7 +11204,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder141 + }, { // dummy141 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11234,7 +11234,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder142 + }, { // dummy142 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11264,7 +11264,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder143 + }, { // dummy143 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11776,7 +11776,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder144 + }, { // dummy144 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11806,7 +11806,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder145 + }, { // dummy145 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11836,7 +11836,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder146 + }, { // dummy146 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11866,7 +11866,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder147 + }, { // dummy147 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11896,7 +11896,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder148 + }, { // dummy148 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11926,7 +11926,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder149 + }, { // dummy149 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11956,7 +11956,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder150 + }, { // dummy150 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11986,7 +11986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder151 + }, { // dummy151 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12016,7 +12016,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder152 + }, { // dummy152 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12046,7 +12046,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder153 + }, { // dummy153 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12076,7 +12076,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder154 + }, { // dummy154 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12106,7 +12106,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder155 + }, { // dummy155 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12618,7 +12618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder156 + }, { // dummy156 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12648,7 +12648,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder157 + }, { // dummy157 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12678,7 +12678,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder158 + }, { // dummy158 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12708,7 +12708,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder159 + }, { // dummy159 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12738,7 +12738,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder160 + }, { // dummy160 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12768,7 +12768,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder161 + }, { // dummy161 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12798,7 +12798,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder162 + }, { // dummy162 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12828,7 +12828,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder163 + }, { // dummy163 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12858,7 +12858,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder164 + }, { // dummy164 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12888,7 +12888,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder165 + }, { // dummy165 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12918,7 +12918,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder166 + }, { // dummy166 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12948,7 +12948,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder167 + }, { // dummy167 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13460,7 +13460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder168 + }, { // dummy168 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13490,7 +13490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder169 + }, { // dummy169 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13520,7 +13520,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder170 + }, { // dummy170 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13550,7 +13550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder171 + }, { // dummy171 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13580,7 +13580,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder172 + }, { // dummy172 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13610,7 +13610,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder173 + }, { // dummy173 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13640,7 +13640,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder174 + }, { // dummy174 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13670,7 +13670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder175 + }, { // dummy175 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13700,7 +13700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder176 + }, { // dummy176 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13730,7 +13730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder177 + }, { // dummy177 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13760,7 +13760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder178 + }, { // dummy178 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13790,7 +13790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder179 + }, { // dummy179 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/box_with_nms_limit_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/box_with_nms_limit_quant8_signed.example.cpp index 86a34a4..e917b34 100644 --- a/runtime/test/generated/spec_V1_3/box_with_nms_limit_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/box_with_nms_limit_quant8_signed.example.cpp
@@ -310,7 +310,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -658,7 +658,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1006,7 +1006,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1354,7 +1354,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1702,7 +1702,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2050,7 +2050,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/cast_identity.example.cpp b/runtime/test/generated/spec_V1_3/cast_identity.example.cpp index b13c730..8522a95 100644 --- a/runtime/test/generated/spec_V1_3/cast_identity.example.cpp +++ b/runtime/test/generated/spec_V1_3/cast_identity.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -346,7 +346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -568,7 +568,7 @@ .scale = 4.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/channel_shuffle_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/channel_shuffle_quant8_signed.example.cpp index 290c303..2096585 100644 --- a/runtime/test/generated/spec_V1_3/channel_shuffle_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/channel_shuffle_quant8_signed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1474,7 +1474,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1642,7 +1642,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2146,7 +2146,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2314,7 +2314,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2482,7 +2482,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2650,7 +2650,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2818,7 +2818,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2986,7 +2986,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3154,7 +3154,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3322,7 +3322,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/concat_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/concat_quant8_signed.example.cpp index 76c5aba..d1daf8f 100644 --- a/runtime/test/generated/spec_V1_3/concat_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/concat_quant8_signed.example.cpp
@@ -170,7 +170,7 @@ .scale = 0.084f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -200,7 +200,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -230,7 +230,7 @@ .scale = 0.089f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -5 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-5}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.029f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -480,7 +480,7 @@ .scale = 0.084f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -510,7 +510,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -540,7 +540,7 @@ .scale = 0.089f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -5 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-5}), .dimensions = {1}, @@ -570,7 +570,7 @@ .scale = 0.029f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -750,7 +750,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -780,7 +780,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -952,7 +952,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -982,7 +982,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1154,7 +1154,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1184,7 +1184,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/conv2d_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/conv2d_quant8_signed.example.cpp index d3eb421..6d46028 100644 --- a/runtime/test/generated/spec_V1_3/conv2d_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/conv2d_quant8_signed.example.cpp
@@ -330,7 +330,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -698,7 +698,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -728,7 +728,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1468,7 +1468,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1498,7 +1498,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1870,7 +1870,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2238,7 +2238,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2268,7 +2268,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2640,7 +2640,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3008,7 +3008,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3038,7 +3038,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3350,7 +3350,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3658,7 +3658,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3688,7 +3688,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4000,7 +4000,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4308,7 +4308,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4338,7 +4338,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4650,7 +4650,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4958,7 +4958,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4988,7 +4988,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -5300,7 +5300,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -5608,7 +5608,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -5638,7 +5638,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -5950,7 +5950,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6258,7 +6258,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6288,7 +6288,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6600,7 +6600,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6908,7 +6908,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6938,7 +6938,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -7256,7 +7256,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7570,7 +7570,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7904,7 +7904,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -8238,7 +8238,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -8572,7 +8572,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -8906,7 +8906,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -9950,7 +9950,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -10278,7 +10278,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -10308,7 +10308,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -10646,7 +10646,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -10980,7 +10980,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11308,7 +11308,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11636,7 +11636,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11666,7 +11666,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -12004,7 +12004,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -12338,7 +12338,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -12606,7 +12606,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -12874,7 +12874,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -12904,7 +12904,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -13182,7 +13182,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -13456,7 +13456,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -13724,7 +13724,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -13992,7 +13992,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -14022,7 +14022,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -14300,7 +14300,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -14574,7 +14574,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -14902,7 +14902,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -15230,7 +15230,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -15260,7 +15260,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -15598,7 +15598,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -15932,7 +15932,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -16260,7 +16260,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -16588,7 +16588,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -16618,7 +16618,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -16956,7 +16956,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -17290,7 +17290,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -17618,7 +17618,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -17946,7 +17946,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -17976,7 +17976,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -18314,7 +18314,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -18648,7 +18648,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -18982,7 +18982,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -19316,7 +19316,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -19644,7 +19644,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -19972,7 +19972,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -20002,7 +20002,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -20340,7 +20340,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -20674,7 +20674,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -21008,7 +21008,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -21342,7 +21342,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -21590,7 +21590,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -21838,7 +21838,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -21868,7 +21868,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -23480,7 +23480,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -23728,7 +23728,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -23758,7 +23758,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -23990,7 +23990,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -24218,7 +24218,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -24248,7 +24248,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -24480,7 +24480,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -24510,7 +24510,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -24742,7 +24742,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -24970,7 +24970,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -25000,7 +25000,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -25232,7 +25232,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -25262,7 +25262,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -25494,7 +25494,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -25722,7 +25722,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -25752,7 +25752,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -25984,7 +25984,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -26212,7 +26212,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -26242,7 +26242,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -26474,7 +26474,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -26504,7 +26504,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/depth_to_space_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/depth_to_space_quant8_signed.example.cpp index 4df108f..d0b65a9 100644 --- a/runtime/test/generated/spec_V1_3/depth_to_space_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/depth_to_space_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -426,7 +426,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -594,7 +594,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -762,7 +762,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -930,7 +930,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1098,7 +1098,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1266,7 +1266,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/depthwise_conv2d_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/depthwise_conv2d_quant8_signed.example.cpp index 3c22ddf..e14a0fb 100644 --- a/runtime/test/generated/spec_V1_3/depthwise_conv2d_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/depthwise_conv2d_quant8_signed.example.cpp
@@ -350,7 +350,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -738,7 +738,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -768,7 +768,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1160,7 +1160,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1548,7 +1548,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1578,7 +1578,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1970,7 +1970,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2358,7 +2358,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2388,7 +2388,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2780,7 +2780,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3168,7 +3168,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3198,7 +3198,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3530,7 +3530,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3858,7 +3858,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3888,7 +3888,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4220,7 +4220,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4548,7 +4548,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4578,7 +4578,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4910,7 +4910,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -5238,7 +5238,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -5268,7 +5268,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -5600,7 +5600,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -5928,7 +5928,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -5958,7 +5958,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6290,7 +6290,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6618,7 +6618,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6648,7 +6648,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6980,7 +6980,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -7308,7 +7308,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -7338,7 +7338,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -7676,7 +7676,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -8010,7 +8010,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -8344,7 +8344,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -8678,7 +8678,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -9032,7 +9032,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -9386,7 +9386,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -9740,7 +9740,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -10094,7 +10094,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -10342,7 +10342,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -10590,7 +10590,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -10620,7 +10620,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -10872,7 +10872,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11120,7 +11120,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11150,7 +11150,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11402,7 +11402,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11432,7 +11432,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11684,7 +11684,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11932,7 +11932,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11962,7 +11962,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -12214,7 +12214,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -12244,7 +12244,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -12596,7 +12596,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -12944,7 +12944,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -12974,7 +12974,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -13326,7 +13326,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -13674,7 +13674,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -13704,7 +13704,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -13996,7 +13996,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -14284,7 +14284,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -14314,7 +14314,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -14606,7 +14606,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -14894,7 +14894,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -14924,7 +14924,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -15276,7 +15276,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -15624,7 +15624,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -15654,7 +15654,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -16006,7 +16006,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -16354,7 +16354,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -16384,7 +16384,7 @@ .scale = 0.125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -16736,7 +16736,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -17084,7 +17084,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -17114,7 +17114,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -17466,7 +17466,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -17814,7 +17814,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -17844,7 +17844,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -18116,7 +18116,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -18384,7 +18384,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -18414,7 +18414,7 @@ .scale = 1.0058823529411764f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/dequantize_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/dequantize_quant8_signed.example.cpp index 73eaaef..2871acc 100644 --- a/runtime/test/generated/spec_V1_3/dequantize_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/dequantize_quant8_signed.example.cpp
@@ -90,7 +90,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -346,7 +346,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -602,7 +602,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -730,7 +730,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -858,7 +858,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -986,7 +986,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/elu.example.cpp b/runtime/test/generated/spec_V1_3/elu.example.cpp index 56305ff..20e983d 100644 --- a/runtime/test/generated/spec_V1_3/elu.example.cpp +++ b/runtime/test/generated/spec_V1_3/elu.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -702,7 +702,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -850,7 +850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/embedding_lookup_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/embedding_lookup_quant8_signed.example.cpp index 0d9620e..72f44b5 100644 --- a/runtime/test/generated/spec_V1_3/embedding_lookup_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/embedding_lookup_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/embedding_lookup_v1_3.example.cpp b/runtime/test/generated/spec_V1_3/embedding_lookup_v1_3.example.cpp index f0228f7..5faa845 100644 --- a/runtime/test/generated/spec_V1_3/embedding_lookup_v1_3.example.cpp +++ b/runtime/test/generated/spec_V1_3/embedding_lookup_v1_3.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/equal_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/equal_quant8_signed.example.cpp index 417a073..0b20d42 100644 --- a/runtime/test/generated/spec_V1_3/equal_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/equal_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -292,7 +292,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -322,7 +322,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 1 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({1}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -97 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-97}), .dimensions = {1}, @@ -504,7 +504,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 112 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({112}), .dimensions = {1}, @@ -656,7 +656,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 112 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({112}), .dimensions = {1}, @@ -686,7 +686,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -97 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-97}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/expand_dims_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/expand_dims_quant8_signed.example.cpp index 41c7f75..d8240a4 100644 --- a/runtime/test/generated/spec_V1_3/expand_dims_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/expand_dims_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/fully_connected_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/fully_connected_quant8_signed.example.cpp index 3e25360..e62dacf 100644 --- a/runtime/test/generated/spec_V1_3/fully_connected_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/fully_connected_quant8_signed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -368,7 +368,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -560,7 +560,7 @@ .scale = 0.2f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -748,7 +748,7 @@ .scale = 0.2f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -778,7 +778,7 @@ .scale = 0.2f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.2f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1000,7 +1000,7 @@ .scale = 0.2f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1192,7 +1192,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1380,7 +1380,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1410,7 +1410,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1602,7 +1602,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1632,7 +1632,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1824,7 +1824,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -2012,7 +2012,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -2042,7 +2042,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/gather_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/gather_quant8_signed.example.cpp index d017f4f..bcb85e4 100644 --- a/runtime/test/generated/spec_V1_3/gather_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/gather_quant8_signed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -1474,7 +1474,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/generate_proposals_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/generate_proposals_quant8_signed.example.cpp index d02ee63..eecc075 100644 --- a/runtime/test/generated/spec_V1_3/generate_proposals_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/generate_proposals_quant8_signed.example.cpp
@@ -330,7 +330,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -360,7 +360,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -732,7 +732,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -762,7 +762,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1134,7 +1134,7 @@ .scale = 0.005f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1164,7 +1164,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1536,7 +1536,7 @@ .scale = 0.005f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1566,7 +1566,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/greater_equal_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/greater_equal_quant8_signed.example.cpp index 75285de..a2d1da9 100644 --- a/runtime/test/generated/spec_V1_3/greater_equal_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/greater_equal_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -292,7 +292,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -322,7 +322,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 1 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({1}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -97 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-97}), .dimensions = {1}, @@ -504,7 +504,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 112 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({112}), .dimensions = {1}, @@ -656,7 +656,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 112 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({112}), .dimensions = {1}, @@ -686,7 +686,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -97 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-97}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/greater_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/greater_quant8_signed.example.cpp index abc7c09..7ae939a 100644 --- a/runtime/test/generated/spec_V1_3/greater_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/greater_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -292,7 +292,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -322,7 +322,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 1 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({1}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -97 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-97}), .dimensions = {1}, @@ -504,7 +504,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 112 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({112}), .dimensions = {1}, @@ -656,7 +656,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 112 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({112}), .dimensions = {1}, @@ -686,7 +686,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -97 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-97}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/grouped_conv2d_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/grouped_conv2d_quant8_signed.example.cpp index 2ea6682..5ce4174 100644 --- a/runtime/test/generated/spec_V1_3/grouped_conv2d_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/grouped_conv2d_quant8_signed.example.cpp
@@ -310,7 +310,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -658,7 +658,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -688,7 +688,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -1388,7 +1388,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -1418,7 +1418,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1776,7 +1776,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -2130,7 +2130,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -2484,7 +2484,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -2838,7 +2838,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -3186,7 +3186,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -3534,7 +3534,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -3564,7 +3564,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3916,7 +3916,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -4264,7 +4264,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -4294,7 +4294,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -4652,7 +4652,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5006,7 +5006,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5360,7 +5360,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5714,7 +5714,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -6062,7 +6062,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -6410,7 +6410,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -6440,7 +6440,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -6792,7 +6792,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7140,7 +7140,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7170,7 +7170,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7528,7 +7528,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7882,7 +7882,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8236,7 +8236,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8590,7 +8590,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8938,7 +8938,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -9286,7 +9286,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -9316,7 +9316,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -9668,7 +9668,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -10016,7 +10016,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -10046,7 +10046,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -10404,7 +10404,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -10758,7 +10758,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -11112,7 +11112,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -11466,7 +11466,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -11814,7 +11814,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -12162,7 +12162,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -12192,7 +12192,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -12544,7 +12544,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -12892,7 +12892,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -12922,7 +12922,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -13280,7 +13280,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -13634,7 +13634,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -13988,7 +13988,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -14342,7 +14342,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -14690,7 +14690,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -15038,7 +15038,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -15068,7 +15068,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -15420,7 +15420,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -15768,7 +15768,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -15798,7 +15798,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -16156,7 +16156,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -16510,7 +16510,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -16864,7 +16864,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -17218,7 +17218,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -17566,7 +17566,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -17914,7 +17914,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -17944,7 +17944,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -18296,7 +18296,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -18644,7 +18644,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -18674,7 +18674,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -19032,7 +19032,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -19386,7 +19386,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -19740,7 +19740,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -20094,7 +20094,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -20442,7 +20442,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -20790,7 +20790,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -20820,7 +20820,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -21172,7 +21172,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -21520,7 +21520,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -21550,7 +21550,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -21908,7 +21908,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -22262,7 +22262,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -22616,7 +22616,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -22970,7 +22970,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -23258,7 +23258,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -23546,7 +23546,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -23576,7 +23576,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -23874,7 +23874,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -24168,7 +24168,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -24456,7 +24456,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -24744,7 +24744,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -24774,7 +24774,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -25072,7 +25072,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -25366,7 +25366,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -25654,7 +25654,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -25942,7 +25942,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -25972,7 +25972,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -26270,7 +26270,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -26564,7 +26564,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -26852,7 +26852,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -27140,7 +27140,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -27170,7 +27170,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -27468,7 +27468,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -27762,7 +27762,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/hard_swish.example.cpp b/runtime/test/generated/spec_V1_3/hard_swish.example.cpp index 031d2f0..41ec186 100644 --- a/runtime/test/generated/spec_V1_3/hard_swish.example.cpp +++ b/runtime/test/generated/spec_V1_3/hard_swish.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -346,7 +346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 0.078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -602,7 +602,7 @@ .scale = 0.078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -730,7 +730,7 @@ .scale = 0.078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -858,7 +858,7 @@ .scale = 0.078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/heatmap_max_keypoint_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/heatmap_max_keypoint_quant8_signed.example.cpp index 7dc535e..70cf1ca 100644 --- a/runtime/test/generated/spec_V1_3/heatmap_max_keypoint_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/heatmap_max_keypoint_quant8_signed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -526,7 +526,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -714,7 +714,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/if_same_branch_model.example.cpp b/runtime/test/generated/spec_V1_3/if_same_branch_model.example.cpp index b1ef962..b6da10a 100644 --- a/runtime/test/generated/spec_V1_3/if_same_branch_model.example.cpp +++ b/runtime/test/generated/spec_V1_3/if_same_branch_model.example.cpp
@@ -199,7 +199,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -349,7 +349,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -645,7 +645,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -795,7 +795,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1091,7 +1091,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1241,7 +1241,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1663,7 +1663,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -1813,7 +1813,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2109,7 +2109,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1}, @@ -2259,7 +2259,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1}, @@ -2555,7 +2555,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2705,7 +2705,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3001,7 +3001,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3151,7 +3151,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3447,7 +3447,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3597,7 +3597,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4019,7 +4019,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -4169,7 +4169,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -4465,7 +4465,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1}, @@ -4615,7 +4615,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/if_simple.example.cpp b/runtime/test/generated/spec_V1_3/if_simple.example.cpp index 1abc689..ed4ecf8 100644 --- a/runtime/test/generated/spec_V1_3/if_simple.example.cpp +++ b/runtime/test/generated/spec_V1_3/if_simple.example.cpp
@@ -248,7 +248,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -447,7 +447,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -841,7 +841,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1434,7 +1434,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1633,7 +1633,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2202,7 +2202,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2401,7 +2401,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2795,7 +2795,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1}, @@ -2994,7 +2994,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1}, @@ -3388,7 +3388,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3587,7 +3587,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3981,7 +3981,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4180,7 +4180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4574,7 +4574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4773,7 +4773,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5342,7 +5342,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -5541,7 +5541,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -5935,7 +5935,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1}, @@ -6134,7 +6134,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/l2_normalization_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/l2_normalization_quant8_signed.example.cpp index 3385329..112b5a2 100644 --- a/runtime/test/generated/spec_V1_3/l2_normalization_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/l2_normalization_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -702,7 +702,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -850,7 +850,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -998,7 +998,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -1146,7 +1146,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -1294,7 +1294,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -1442,7 +1442,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -1590,7 +1590,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -1738,7 +1738,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -1886,7 +1886,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -2034,7 +2034,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -2182,7 +2182,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -2330,7 +2330,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -2478,7 +2478,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -2626,7 +2626,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -2774,7 +2774,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -2922,7 +2922,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -3050,7 +3050,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -3178,7 +3178,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -3306,7 +3306,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1}, @@ -3434,7 +3434,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/l2_normalization_zeros.example.cpp b/runtime/test/generated/spec_V1_3/l2_normalization_zeros.example.cpp index f1ea696..94099b1 100644 --- a/runtime/test/generated/spec_V1_3/l2_normalization_zeros.example.cpp +++ b/runtime/test/generated/spec_V1_3/l2_normalization_zeros.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 32 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({32}), .dimensions = {1}, @@ -702,7 +702,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -96 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-96}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/less_equal_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/less_equal_quant8_signed.example.cpp index 63eea0a..73b66dd 100644 --- a/runtime/test/generated/spec_V1_3/less_equal_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/less_equal_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -292,7 +292,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -322,7 +322,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 1 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({1}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -97 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-97}), .dimensions = {1}, @@ -504,7 +504,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 112 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({112}), .dimensions = {1}, @@ -656,7 +656,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 112 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({112}), .dimensions = {1}, @@ -686,7 +686,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -97 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-97}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/less_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/less_quant8_signed.example.cpp index 4d00e24..6147f53 100644 --- a/runtime/test/generated/spec_V1_3/less_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/less_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -292,7 +292,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -322,7 +322,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 1 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({1}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -97 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-97}), .dimensions = {1}, @@ -504,7 +504,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 112 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({112}), .dimensions = {1}, @@ -656,7 +656,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 112 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({112}), .dimensions = {1}, @@ -686,7 +686,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -97 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-97}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/logistic_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/logistic_quant8_signed.example.cpp index 7179036..9f2ca39 100644 --- a/runtime/test/generated/spec_V1_3/logistic_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/logistic_quant8_signed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/max_pool_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/max_pool_quant8_signed.example.cpp index 21456be..61ea71f 100644 --- a/runtime/test/generated/spec_V1_3/max_pool_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/max_pool_quant8_signed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -358,7 +358,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -566,7 +566,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -754,7 +754,7 @@ .scale = 0.0625f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1082,7 +1082,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1410,7 +1410,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1738,7 +1738,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2066,7 +2066,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2394,7 +2394,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2722,7 +2722,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2990,7 +2990,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3258,7 +3258,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/maximum_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/maximum_quant8_signed.example.cpp index 5e80990..b92a009 100644 --- a/runtime/test/generated/spec_V1_3/maximum_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/maximum_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -292,7 +292,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -322,7 +322,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -504,7 +504,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/mean_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/mean_quant8_signed.example.cpp index 8ddb754..af3c7d2 100644 --- a/runtime/test/generated/spec_V1_3/mean_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/mean_quant8_signed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.8f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -123 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-123}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.8f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -123 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-123}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.8f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -123 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-123}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.8f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -123 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-123}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/minimum_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/minimum_quant8_signed.example.cpp index a7b4b32..39a6c70 100644 --- a/runtime/test/generated/spec_V1_3/minimum_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/minimum_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -292,7 +292,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -322,7 +322,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -504,7 +504,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/mul_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/mul_quant8_signed.example.cpp index acb3922..d483f9b 100644 --- a/runtime/test/generated/spec_V1_3/mul_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/mul_quant8_signed.example.cpp
@@ -130,7 +130,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -332,7 +332,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -362,7 +362,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/not_equal_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/not_equal_quant8_signed.example.cpp index 4cb5e91..dfe3c6e 100644 --- a/runtime/test/generated/spec_V1_3/not_equal_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/not_equal_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -292,7 +292,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -322,7 +322,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 1 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({1}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -97 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-97}), .dimensions = {1}, @@ -504,7 +504,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 112 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({112}), .dimensions = {1}, @@ -656,7 +656,7 @@ .scale = 1.49725f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 112 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({112}), .dimensions = {1}, @@ -686,7 +686,7 @@ .scale = 1.64771f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -97 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-97}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/pad_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/pad_quant8_signed.example.cpp index b4de32c..2eb1f62 100644 --- a/runtime/test/generated/spec_V1_3/pad_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/pad_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -702,7 +702,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -850,7 +850,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -998,7 +998,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -119 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-119}), .dimensions = {1}, @@ -1146,7 +1146,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -119 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-119}), .dimensions = {1}, @@ -1314,7 +1314,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -124 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-124}), .dimensions = {1}, @@ -1482,7 +1482,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -124 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-124}), .dimensions = {1}, @@ -1650,7 +1650,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -124 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-124}), .dimensions = {1}, @@ -1818,7 +1818,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -124 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-124}), .dimensions = {1}, @@ -1986,7 +1986,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -124 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-124}), .dimensions = {1}, @@ -2154,7 +2154,7 @@ .scale = 2.3f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -124 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-124}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/pow_same_shape.example.cpp b/runtime/test/generated/spec_V1_3/pow_same_shape.example.cpp index 6935a90..24521ac 100644 --- a/runtime/test/generated/spec_V1_3/pow_same_shape.example.cpp +++ b/runtime/test/generated/spec_V1_3/pow_same_shape.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -140,7 +140,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -292,7 +292,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -322,7 +322,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -504,7 +504,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/prelu_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/prelu_quant8_signed.example.cpp index 0715e60..4cc013b 100644 --- a/runtime/test/generated/spec_V1_3/prelu_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/prelu_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -288,7 +288,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -78 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-78}), .dimensions = {1}, @@ -440,7 +440,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -588,7 +588,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -618,7 +618,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -78 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-78}), .dimensions = {1}, @@ -770,7 +770,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -918,7 +918,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -948,7 +948,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -78 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-78}), .dimensions = {1}, @@ -1100,7 +1100,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1248,7 +1248,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1278,7 +1278,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -78 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-78}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/qlstm_noprojection.example.cpp b/runtime/test/generated/spec_V1_3/qlstm_noprojection.example.cpp index c197294..3c9c4dd 100644 --- a/runtime/test/generated/spec_V1_3/qlstm_noprojection.example.cpp +++ b/runtime/test/generated/spec_V1_3/qlstm_noprojection.example.cpp
@@ -750,7 +750,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -780,7 +780,7 @@ .scale = 3.05176e-05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/qlstm_projection.example.cpp b/runtime/test/generated/spec_V1_3/qlstm_projection.example.cpp index c7c74f5..47d4626 100644 --- a/runtime/test/generated/spec_V1_3/qlstm_projection.example.cpp +++ b/runtime/test/generated/spec_V1_3/qlstm_projection.example.cpp
@@ -750,7 +750,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -780,7 +780,7 @@ .scale = 3.05176e-05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1572,7 +1572,7 @@ .scale = 0.0078125f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1602,7 +1602,7 @@ .scale = 3.05176e-05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/quantize_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/quantize_quant8_signed.example.cpp index 330806d..721fdaf 100644 --- a/runtime/test/generated/spec_V1_3/quantize_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/quantize_quant8_signed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -346,7 +346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -474,7 +474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -602,7 +602,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -730,7 +730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -858,7 +858,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -986,7 +986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/rank.example.cpp b/runtime/test/generated/spec_V1_3/rank.example.cpp index 8c5c6c3..22aadf8 100644 --- a/runtime/test/generated/spec_V1_3/rank.example.cpp +++ b/runtime/test/generated/spec_V1_3/rank.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -265,7 +265,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -393,7 +393,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -521,7 +521,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -649,7 +649,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -824,7 +824,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -952,7 +952,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1080,7 +1080,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/reduce_max_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/reduce_max_quant8_signed.example.cpp index d0e7df7..e80d984 100644 --- a/runtime/test/generated/spec_V1_3/reduce_max_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/reduce_max_quant8_signed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/reduce_min_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/reduce_min_quant8_signed.example.cpp index d299dbb..a83fb07 100644 --- a/runtime/test/generated/spec_V1_3/reduce_min_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/reduce_min_quant8_signed.example.cpp
@@ -130,7 +130,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/relu1_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/relu1_quant8_signed.example.cpp index 2d27258..8e2e1df 100644 --- a/runtime/test/generated/spec_V1_3/relu1_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/relu1_quant8_signed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -346,7 +346,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/relu6_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/relu6_quant8_signed.example.cpp index b3c19ff..103ad51 100644 --- a/runtime/test/generated/spec_V1_3/relu6_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/relu6_quant8_signed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -346,7 +346,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/relu_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/relu_quant8_signed.example.cpp index 9c9b290..b1005b4 100644 --- a/runtime/test/generated/spec_V1_3/relu_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/relu_quant8_signed.example.cpp
@@ -90,7 +90,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -218,7 +218,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -346,7 +346,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/reshape_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/reshape_quant8_signed.example.cpp index 658e237..57f19e1 100644 --- a/runtime/test/generated/spec_V1_3/reshape_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/reshape_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/resize_bilinear_v1_3.example.cpp b/runtime/test/generated/spec_V1_3/resize_bilinear_v1_3.example.cpp index f270abc..4092354 100644 --- a/runtime/test/generated/spec_V1_3/resize_bilinear_v1_3.example.cpp +++ b/runtime/test/generated/spec_V1_3/resize_bilinear_v1_3.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -646,7 +646,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -874,7 +874,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1102,7 +1102,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1330,7 +1330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1558,7 +1558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1786,7 +1786,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2014,7 +2014,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2242,7 +2242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2470,7 +2470,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2698,7 +2698,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2926,7 +2926,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3154,7 +3154,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3382,7 +3382,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3610,7 +3610,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3838,7 +3838,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4066,7 +4066,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4294,7 +4294,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4522,7 +4522,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -4750,7 +4750,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -4978,7 +4978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5206,7 +5206,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5434,7 +5434,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5662,7 +5662,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -5890,7 +5890,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -6118,7 +6118,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6346,7 +6346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6574,7 +6574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6802,7 +6802,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7030,7 +7030,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7258,7 +7258,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7486,7 +7486,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7714,7 +7714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7942,7 +7942,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -8170,7 +8170,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8398,7 +8398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8626,7 +8626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8854,7 +8854,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -9082,7 +9082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9310,7 +9310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9538,7 +9538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9766,7 +9766,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -9994,7 +9994,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10222,7 +10222,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10450,7 +10450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10678,7 +10678,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/resize_nearest_neighbor_v1_3.example.cpp b/runtime/test/generated/spec_V1_3/resize_nearest_neighbor_v1_3.example.cpp index 4b65f2a..a155216 100644 --- a/runtime/test/generated/spec_V1_3/resize_nearest_neighbor_v1_3.example.cpp +++ b/runtime/test/generated/spec_V1_3/resize_nearest_neighbor_v1_3.example.cpp
@@ -190,7 +190,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -418,7 +418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -646,7 +646,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -874,7 +874,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1102,7 +1102,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1330,7 +1330,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1558,7 +1558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1786,7 +1786,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2014,7 +2014,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2242,7 +2242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2470,7 +2470,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2698,7 +2698,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2926,7 +2926,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3154,7 +3154,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3382,7 +3382,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3610,7 +3610,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3838,7 +3838,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4066,7 +4066,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4294,7 +4294,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4522,7 +4522,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -4750,7 +4750,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4978,7 +4978,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5206,7 +5206,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5434,7 +5434,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -5662,7 +5662,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5890,7 +5890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6118,7 +6118,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6346,7 +6346,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -6574,7 +6574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6802,7 +6802,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7030,7 +7030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7258,7 +7258,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7486,7 +7486,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7714,7 +7714,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7942,7 +7942,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8170,7 +8170,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -8398,7 +8398,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8626,7 +8626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8854,7 +8854,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9082,7 +9082,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -9310,7 +9310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9538,7 +9538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9766,7 +9766,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9994,7 +9994,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -10222,7 +10222,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10450,7 +10450,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -10678,7 +10678,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10906,7 +10906,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -11134,7 +11134,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11362,7 +11362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -11590,7 +11590,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11818,7 +11818,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -12046,7 +12046,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12274,7 +12274,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12502,7 +12502,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -12730,7 +12730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12958,7 +12958,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13186,7 +13186,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13414,7 +13414,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -13642,7 +13642,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13870,7 +13870,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14098,7 +14098,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -14326,7 +14326,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -14554,7 +14554,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -14782,7 +14782,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15010,7 +15010,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -15238,7 +15238,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -15466,7 +15466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/resize_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/resize_quant8_signed.example.cpp index a9d9971..aca88f5 100644 --- a/runtime/test/generated/spec_V1_3/resize_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/resize_quant8_signed.example.cpp
@@ -150,7 +150,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -338,7 +338,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -526,7 +526,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -714,7 +714,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -902,7 +902,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1090,7 +1090,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1278,7 +1278,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1466,7 +1466,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1634,7 +1634,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1802,7 +1802,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3130,7 +3130,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3318,7 +3318,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3506,7 +3506,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3694,7 +3694,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3882,7 +3882,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4070,7 +4070,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4258,7 +4258,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4446,7 +4446,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4634,7 +4634,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -4822,7 +4822,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5010,7 +5010,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5198,7 +5198,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5386,7 +5386,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5574,7 +5574,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5762,7 +5762,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5950,7 +5950,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -6138,7 +6138,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -6326,7 +6326,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -6514,7 +6514,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -6702,7 +6702,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -6890,7 +6890,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7078,7 +7078,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7266,7 +7266,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7454,7 +7454,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7642,7 +7642,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7830,7 +7830,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8018,7 +8018,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8206,7 +8206,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8394,7 +8394,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8582,7 +8582,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8770,7 +8770,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8958,7 +8958,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/roi_align_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/roi_align_quant8_signed.example.cpp index 67122ea..3c49b6f 100644 --- a/runtime/test/generated/spec_V1_3/roi_align_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/roi_align_quant8_signed.example.cpp
@@ -270,7 +270,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -578,7 +578,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -886,7 +886,7 @@ .scale = 0.04f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1194,7 +1194,7 @@ .scale = 0.04f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1502,7 +1502,7 @@ .scale = 0.04f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1810,7 +1810,7 @@ .scale = 0.04f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2118,7 +2118,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2426,7 +2426,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3236,7 +3236,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3544,7 +3544,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/roi_pooling_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/roi_pooling_quant8_signed.example.cpp index c066e9c..a93ec28 100644 --- a/runtime/test/generated/spec_V1_3/roi_pooling_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/roi_pooling_quant8_signed.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -498,7 +498,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -766,7 +766,7 @@ .scale = 0.04f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1034,7 +1034,7 @@ .scale = 0.04f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1302,7 +1302,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1570,7 +1570,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/select_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/select_quant8_signed.example.cpp index e2bdc2e..bd04aa6 100644 --- a/runtime/test/generated/spec_V1_3/select_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/select_quant8_signed.example.cpp
@@ -130,7 +130,7 @@ .scale = 1.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 1 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({1}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -332,7 +332,7 @@ .scale = 1.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 1 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({1}), .dimensions = {1}, @@ -362,7 +362,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/slice_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/slice_quant8_signed.example.cpp index 301b319..a02da73 100644 --- a/runtime/test/generated/spec_V1_3/slice_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/slice_quant8_signed.example.cpp
@@ -130,7 +130,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/softmax_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/softmax_quant8_signed.example.cpp index fe51456..4be6eb1 100644 --- a/runtime/test/generated/spec_V1_3/softmax_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/softmax_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -554,7 +554,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -702,7 +702,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -850,7 +850,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -998,7 +998,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1146,7 +1146,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1314,7 +1314,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1482,7 +1482,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1650,7 +1650,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1818,7 +1818,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1986,7 +1986,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2154,7 +2154,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2322,7 +2322,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2490,7 +2490,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2658,7 +2658,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2826,7 +2826,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2994,7 +2994,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3162,7 +3162,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3330,7 +3330,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3498,7 +3498,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3666,7 +3666,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3834,7 +3834,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -4002,7 +4002,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -4170,7 +4170,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -4338,7 +4338,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -4506,7 +4506,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -4674,7 +4674,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -4842,7 +4842,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -5010,7 +5010,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -5178,7 +5178,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -5346,7 +5346,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -5514,7 +5514,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -5682,7 +5682,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -5850,7 +5850,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -6018,7 +6018,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -6186,7 +6186,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -6354,7 +6354,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -6522,7 +6522,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -6690,7 +6690,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -6858,7 +6858,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7026,7 +7026,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7194,7 +7194,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7362,7 +7362,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7530,7 +7530,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7698,7 +7698,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7866,7 +7866,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/space_to_batch_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/space_to_batch_quant8_signed.example.cpp index 56453e2..5f422f2 100644 --- a/runtime/test/generated/spec_V1_3/space_to_batch_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/space_to_batch_quant8_signed.example.cpp
@@ -130,7 +130,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -298,7 +298,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -466,7 +466,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -802,7 +802,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1138,7 +1138,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -119 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-119}), .dimensions = {1}, @@ -1306,7 +1306,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -119 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-119}), .dimensions = {1}, @@ -1494,7 +1494,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1682,7 +1682,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1870,7 +1870,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2058,7 +2058,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2246,7 +2246,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2434,7 +2434,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2622,7 +2622,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2810,7 +2810,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2998,7 +2998,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3186,7 +3186,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3374,7 +3374,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3562,7 +3562,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3750,7 +3750,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -3938,7 +3938,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -4126,7 +4126,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -4314,7 +4314,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/space_to_depth_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/space_to_depth_quant8_signed.example.cpp index c36653e..cf15e3a 100644 --- a/runtime/test/generated/spec_V1_3/space_to_depth_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/space_to_depth_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -426,7 +426,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -594,7 +594,7 @@ .scale = 0.1f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -762,7 +762,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -930,7 +930,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -1098,7 +1098,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1266,7 +1266,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/split_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/split_quant8_signed.example.cpp index e48c5e1..31ea2cd 100644 --- a/runtime/test/generated/spec_V1_3/split_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/split_quant8_signed.example.cpp
@@ -170,7 +170,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -378,7 +378,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -566,7 +566,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -125 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-125}), .dimensions = {1}, @@ -754,7 +754,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -125 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-125}), .dimensions = {1}, @@ -962,7 +962,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -125 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-125}), .dimensions = {1}, @@ -1150,7 +1150,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/squeeze_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/squeeze_quant8_signed.example.cpp index c635820..95e1fb3 100644 --- a/runtime/test/generated/spec_V1_3/squeeze_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/squeeze_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/strided_slice_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/strided_slice_quant8_signed.example.cpp index 5c96853..b0b152b 100644 --- a/runtime/test/generated/spec_V1_3/strided_slice_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/strided_slice_quant8_signed.example.cpp
@@ -210,7 +210,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -458,7 +458,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -706,7 +706,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -954,7 +954,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1202,7 +1202,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1450,7 +1450,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1698,7 +1698,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1946,7 +1946,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2194,7 +2194,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2442,7 +2442,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2690,7 +2690,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2938,7 +2938,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3186,7 +3186,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3434,7 +3434,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3682,7 +3682,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3930,7 +3930,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4178,7 +4178,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4426,7 +4426,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4674,7 +4674,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4922,7 +4922,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -5170,7 +5170,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -5418,7 +5418,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/sub_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/sub_quant8_signed.example.cpp index 71d892d..f19c67b 100644 --- a/runtime/test/generated/spec_V1_3/sub_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/sub_quant8_signed.example.cpp
@@ -130,7 +130,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -160,7 +160,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -332,7 +332,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -362,7 +362,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -534,7 +534,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -564,7 +564,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -736,7 +736,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -766,7 +766,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -938,7 +938,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -968,7 +968,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -1140,7 +1140,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1170,7 +1170,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -1342,7 +1342,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1372,7 +1372,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -1544,7 +1544,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1574,7 +1574,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -1746,7 +1746,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1776,7 +1776,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -1948,7 +1948,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1978,7 +1978,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -2150,7 +2150,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2180,7 +2180,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -2352,7 +2352,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2382,7 +2382,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -2554,7 +2554,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2584,7 +2584,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -2756,7 +2756,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2786,7 +2786,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -2958,7 +2958,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2988,7 +2988,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -3160,7 +3160,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3190,7 +3190,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -3362,7 +3362,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -3392,7 +3392,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3564,7 +3564,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -3594,7 +3594,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3766,7 +3766,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -3796,7 +3796,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -3968,7 +3968,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -3998,7 +3998,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4170,7 +4170,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -4200,7 +4200,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -4372,7 +4372,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -4402,7 +4402,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -4574,7 +4574,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -4604,7 +4604,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -4776,7 +4776,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -4806,7 +4806,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -4978,7 +4978,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -5008,7 +5008,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -5180,7 +5180,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -5210,7 +5210,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -5382,7 +5382,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -5412,7 +5412,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -5584,7 +5584,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -5614,7 +5614,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -5786,7 +5786,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -5816,7 +5816,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -5988,7 +5988,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -6018,7 +6018,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -6190,7 +6190,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -6220,7 +6220,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -6392,7 +6392,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -6422,7 +6422,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -6594,7 +6594,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -6624,7 +6624,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6796,7 +6796,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -6826,7 +6826,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6998,7 +6998,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -7028,7 +7028,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -7200,7 +7200,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -7230,7 +7230,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -7402,7 +7402,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -7432,7 +7432,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -7604,7 +7604,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -7634,7 +7634,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -7806,7 +7806,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -7836,7 +7836,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -8008,7 +8008,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -8038,7 +8038,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -8210,7 +8210,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -8240,7 +8240,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -8412,7 +8412,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -8442,7 +8442,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -8614,7 +8614,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -8644,7 +8644,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -8816,7 +8816,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -8846,7 +8846,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -9018,7 +9018,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -9048,7 +9048,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -9220,7 +9220,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -9250,7 +9250,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -9422,7 +9422,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -9452,7 +9452,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -9624,7 +9624,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -9654,7 +9654,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -9826,7 +9826,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -9856,7 +9856,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -10028,7 +10028,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -10058,7 +10058,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -10230,7 +10230,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -10260,7 +10260,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -10432,7 +10432,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -10462,7 +10462,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -10634,7 +10634,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -10664,7 +10664,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -10836,7 +10836,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -10866,7 +10866,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -11038,7 +11038,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -11068,7 +11068,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -11240,7 +11240,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -11270,7 +11270,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -127 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-127}), .dimensions = {1}, @@ -11442,7 +11442,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -11472,7 +11472,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -11644,7 +11644,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -11674,7 +11674,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -11846,7 +11846,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -11876,7 +11876,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -12048,7 +12048,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -12078,7 +12078,7 @@ .scale = 0.01f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -12250,7 +12250,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -12280,7 +12280,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -12452,7 +12452,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -12482,7 +12482,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder123 + }, { // dummy123 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -12654,7 +12654,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder124 + }, { // dummy124 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -12684,7 +12684,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -12856,7 +12856,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -12886,7 +12886,7 @@ .scale = 10.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -8 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-8}), .dimensions = {1}, @@ -13058,7 +13058,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder128 + }, { // dummy128 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -13088,7 +13088,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder129 + }, { // dummy129 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -13260,7 +13260,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder130 + }, { // dummy130 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -13290,7 +13290,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder131 + }, { // dummy131 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/tanh_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/tanh_quant8_signed.example.cpp index 54bccba..0ec65a0 100644 --- a/runtime/test/generated/spec_V1_3/tanh_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/tanh_quant8_signed.example.cpp
@@ -90,7 +90,7 @@ .scale = 0.05f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/tile_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/tile_quant8_signed.example.cpp index 9b0e088..b73d6de 100644 --- a/runtime/test/generated/spec_V1_3/tile_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/tile_quant8_signed.example.cpp
@@ -110,7 +110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -258,7 +258,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1}, @@ -406,7 +406,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -1 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-1}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/topk_v2_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/topk_v2_quant8_signed.example.cpp index 1cdc85f..bf18694 100644 --- a/runtime/test/generated/spec_V1_3/topk_v2_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/topk_v2_quant8_signed.example.cpp
@@ -130,7 +130,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/transpose_conv2d_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/transpose_conv2d_quant8_signed.example.cpp index 3ba6baf..b6f6a0c 100644 --- a/runtime/test/generated/spec_V1_3/transpose_conv2d_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/transpose_conv2d_quant8_signed.example.cpp
@@ -250,7 +250,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -538,7 +538,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -568,7 +568,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -866,7 +866,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -1160,7 +1160,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -1448,7 +1448,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1736,7 +1736,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1766,7 +1766,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2058,7 +2058,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -2346,7 +2346,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -2376,7 +2376,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2674,7 +2674,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -2968,7 +2968,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -3262,7 +3262,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -3556,7 +3556,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -3844,7 +3844,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4132,7 +4132,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4162,7 +4162,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4454,7 +4454,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -4742,7 +4742,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -4772,7 +4772,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -5070,7 +5070,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5364,7 +5364,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5658,7 +5658,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5952,7 +5952,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -6240,7 +6240,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6528,7 +6528,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6558,7 +6558,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6850,7 +6850,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7138,7 +7138,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7168,7 +7168,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7466,7 +7466,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7760,7 +7760,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8054,7 +8054,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8348,7 +8348,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8636,7 +8636,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -8924,7 +8924,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -8954,7 +8954,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -9246,7 +9246,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -9534,7 +9534,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -9564,7 +9564,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -9862,7 +9862,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -10156,7 +10156,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -10450,7 +10450,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -10744,7 +10744,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -11032,7 +11032,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11320,7 +11320,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11350,7 +11350,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11642,7 +11642,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -11930,7 +11930,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -11960,7 +11960,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -12258,7 +12258,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -12552,7 +12552,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -12846,7 +12846,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -13140,7 +13140,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -13428,7 +13428,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -13716,7 +13716,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -13746,7 +13746,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -14038,7 +14038,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -14326,7 +14326,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -14356,7 +14356,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -14654,7 +14654,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -14948,7 +14948,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -15242,7 +15242,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -15536,7 +15536,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -15824,7 +15824,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -16112,7 +16112,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -16142,7 +16142,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -16434,7 +16434,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -16722,7 +16722,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -16752,7 +16752,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -17050,7 +17050,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -17344,7 +17344,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -17638,7 +17638,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -17932,7 +17932,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -18220,7 +18220,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -18508,7 +18508,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -18538,7 +18538,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -18830,7 +18830,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -19118,7 +19118,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -19148,7 +19148,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -19446,7 +19446,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -19740,7 +19740,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -20034,7 +20034,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -20328,7 +20328,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -20616,7 +20616,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -20904,7 +20904,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -20934,7 +20934,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -21232,7 +21232,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -21526,7 +21526,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -21814,7 +21814,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -22102,7 +22102,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -22132,7 +22132,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -22430,7 +22430,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -22724,7 +22724,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -23012,7 +23012,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -23300,7 +23300,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -23330,7 +23330,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -23622,7 +23622,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -23910,7 +23910,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -23940,7 +23940,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -24232,7 +24232,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -118 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-118}), .dimensions = {1}, @@ -24520,7 +24520,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -118 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-118}), .dimensions = {1}, @@ -24550,7 +24550,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -24842,7 +24842,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -118 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-118}), .dimensions = {1}, @@ -25130,7 +25130,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -118 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-118}), .dimensions = {1}, @@ -25160,7 +25160,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -25492,7 +25492,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -25820,7 +25820,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -25850,7 +25850,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -26182,7 +26182,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -26510,7 +26510,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -26540,7 +26540,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -28212,7 +28212,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -28500,7 +28500,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -28530,7 +28530,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -28822,7 +28822,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -29110,7 +29110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -29140,7 +29140,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/transpose_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/transpose_quant8_signed.example.cpp index e7b6f9c..c3bd71a 100644 --- a/runtime/test/generated/spec_V1_3/transpose_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/transpose_quant8_signed.example.cpp
@@ -250,7 +250,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -538,7 +538,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -568,7 +568,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -866,7 +866,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -1160,7 +1160,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -1448,7 +1448,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1736,7 +1736,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -1766,7 +1766,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -2058,7 +2058,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -2346,7 +2346,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -2376,7 +2376,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -2674,7 +2674,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -2968,7 +2968,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -3262,7 +3262,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -3556,7 +3556,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -3844,7 +3844,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4132,7 +4132,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4162,7 +4162,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -4454,7 +4454,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -4742,7 +4742,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -4772,7 +4772,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -5070,7 +5070,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5364,7 +5364,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5658,7 +5658,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -5952,7 +5952,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -6240,7 +6240,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6528,7 +6528,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6558,7 +6558,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -6850,7 +6850,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7138,7 +7138,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7168,7 +7168,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7466,7 +7466,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -7760,7 +7760,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8054,7 +8054,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8348,7 +8348,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -8636,7 +8636,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -8924,7 +8924,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -8954,7 +8954,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -9246,7 +9246,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -9534,7 +9534,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -9564,7 +9564,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -9862,7 +9862,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -10156,7 +10156,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -10450,7 +10450,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -10744,7 +10744,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -11032,7 +11032,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11320,7 +11320,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11350,7 +11350,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -11642,7 +11642,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -11930,7 +11930,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -11960,7 +11960,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -12258,7 +12258,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -12552,7 +12552,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -12846,7 +12846,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -13140,7 +13140,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder54 + }, { // dummy54 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -13428,7 +13428,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -13716,7 +13716,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -13746,7 +13746,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -14038,7 +14038,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -14326,7 +14326,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -14356,7 +14356,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -14654,7 +14654,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -14948,7 +14948,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder62 + }, { // dummy62 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -15242,7 +15242,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -15536,7 +15536,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -15824,7 +15824,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -16112,7 +16112,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -16142,7 +16142,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder67 + }, { // dummy67 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -16434,7 +16434,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder68 + }, { // dummy68 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -16722,7 +16722,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder69 + }, { // dummy69 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -16752,7 +16752,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -17050,7 +17050,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -17344,7 +17344,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -17638,7 +17638,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -17932,7 +17932,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder74 + }, { // dummy74 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -18220,7 +18220,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -18508,7 +18508,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -18538,7 +18538,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -18830,7 +18830,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -19118,7 +19118,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -19148,7 +19148,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -19446,7 +19446,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -19740,7 +19740,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder82 + }, { // dummy82 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -20034,7 +20034,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder83 + }, { // dummy83 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -20328,7 +20328,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -20616,7 +20616,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -20904,7 +20904,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -20934,7 +20934,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -21232,7 +21232,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder88 + }, { // dummy88 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -21526,7 +21526,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder89 + }, { // dummy89 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -21814,7 +21814,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -22102,7 +22102,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -22132,7 +22132,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -22430,7 +22430,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -22724,7 +22724,7 @@ .scale = 2.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -23012,7 +23012,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -23300,7 +23300,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -23330,7 +23330,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder97 + }, { // dummy97 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -23622,7 +23622,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -23910,7 +23910,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -23940,7 +23940,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -24232,7 +24232,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -118 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-118}), .dimensions = {1}, @@ -24520,7 +24520,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -118 - }, { // placeholder102 + }, { // dummy102 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-118}), .dimensions = {1}, @@ -24550,7 +24550,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder103 + }, { // dummy103 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -24842,7 +24842,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -118 - }, { // placeholder104 + }, { // dummy104 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-118}), .dimensions = {1}, @@ -25130,7 +25130,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -118 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-118}), .dimensions = {1}, @@ -25160,7 +25160,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -25492,7 +25492,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -25820,7 +25820,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -25850,7 +25850,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder109 + }, { // dummy109 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -26182,7 +26182,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -26510,7 +26510,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -26540,7 +26540,7 @@ .scale = 0.25f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -28212,7 +28212,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -28500,7 +28500,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -28530,7 +28530,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -28822,7 +28822,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -29110,7 +29110,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -28 - }, { // placeholder117 + }, { // dummy117 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-28}), .dimensions = {1}, @@ -29140,7 +29140,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder118 + }, { // dummy118 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -29292,7 +29292,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -29440,7 +29440,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1}, @@ -29588,7 +29588,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = -128 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({-128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/unidirectional_sequence_lstm_layer_norm_cifg_peephole_state_output.example.cpp b/runtime/test/generated/spec_V1_3/unidirectional_sequence_lstm_layer_norm_cifg_peephole_state_output.example.cpp index 897b9ba..a8cc2b8 100644 --- a/runtime/test/generated/spec_V1_3/unidirectional_sequence_lstm_layer_norm_cifg_peephole_state_output.example.cpp +++ b/runtime/test/generated/spec_V1_3/unidirectional_sequence_lstm_layer_norm_cifg_peephole_state_output.example.cpp
@@ -670,7 +670,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -700,7 +700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -730,7 +730,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -760,7 +760,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -790,7 +790,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -820,7 +820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -850,7 +850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -880,7 +880,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -910,7 +910,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -940,7 +940,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -970,7 +970,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1000,7 +1000,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1030,7 +1030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1060,7 +1060,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1090,7 +1090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1120,7 +1120,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1150,7 +1150,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1180,7 +1180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1956,7 +1956,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1986,7 +1986,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2016,7 +2016,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2046,7 +2046,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2076,7 +2076,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2106,7 +2106,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2136,7 +2136,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2166,7 +2166,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2196,7 +2196,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2226,7 +2226,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2256,7 +2256,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2286,7 +2286,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2316,7 +2316,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2346,7 +2346,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2376,7 +2376,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder32 + }, { // dummy32 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2406,7 +2406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder33 + }, { // dummy33 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2436,7 +2436,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder34 + }, { // dummy34 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2466,7 +2466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3242,7 +3242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3272,7 +3272,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3302,7 +3302,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3332,7 +3332,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder39 + }, { // dummy39 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3362,7 +3362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3392,7 +3392,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3422,7 +3422,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3452,7 +3452,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3482,7 +3482,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3512,7 +3512,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3542,7 +3542,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3572,7 +3572,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder47 + }, { // dummy47 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3602,7 +3602,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder48 + }, { // dummy48 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3632,7 +3632,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3662,7 +3662,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3692,7 +3692,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3722,7 +3722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3752,7 +3752,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder53 + }, { // dummy53 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/unidirectional_sequence_rnn.example.cpp b/runtime/test/generated/spec_V1_3/unidirectional_sequence_rnn.example.cpp index ccc599d..d5839ee 100644 --- a/runtime/test/generated/spec_V1_3/unidirectional_sequence_rnn.example.cpp +++ b/runtime/test/generated/spec_V1_3/unidirectional_sequence_rnn.example.cpp
@@ -230,7 +230,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -260,7 +260,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -320,7 +320,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -350,7 +350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder4 + }, { // dummy4 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -634,7 +634,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -664,7 +664,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -694,7 +694,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -724,7 +724,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -754,7 +754,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1038,7 +1038,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1068,7 +1068,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1098,7 +1098,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder12 + }, { // dummy12 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1128,7 +1128,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder13 + }, { // dummy13 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1158,7 +1158,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1442,7 +1442,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1472,7 +1472,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1502,7 +1502,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1532,7 +1532,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder18 + }, { // dummy18 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1562,7 +1562,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder19 + }, { // dummy19 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1846,7 +1846,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1876,7 +1876,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1906,7 +1906,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1936,7 +1936,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1966,7 +1966,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2250,7 +2250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2280,7 +2280,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2310,7 +2310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder27 + }, { // dummy27 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2340,7 +2340,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2370,7 +2370,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/while_fib.example.cpp b/runtime/test/generated/spec_V1_3/while_fib.example.cpp index 2c522b0..e8f4740 100644 --- a/runtime/test/generated/spec_V1_3/while_fib.example.cpp +++ b/runtime/test/generated/spec_V1_3/while_fib.example.cpp
@@ -611,7 +611,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -894,7 +894,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1715,7 +1715,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1998,7 +1998,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2819,7 +2819,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3102,7 +3102,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -3923,7 +3923,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -4206,7 +4206,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5027,7 +5027,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -5310,7 +5310,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -6131,7 +6131,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -6414,7 +6414,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7235,7 +7235,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -7518,7 +7518,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8339,7 +8339,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -8622,7 +8622,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -9443,7 +9443,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -9726,7 +9726,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -10547,7 +10547,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -10830,7 +10830,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -11651,7 +11651,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11934,7 +11934,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12755,7 +12755,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13038,7 +13038,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -13859,7 +13859,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -14142,7 +14142,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -14963,7 +14963,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -15246,7 +15246,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -16067,7 +16067,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -16350,7 +16350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -17141,7 +17141,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17414,7 +17414,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18205,7 +18205,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18478,7 +18478,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19269,7 +19269,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -19542,7 +19542,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20333,7 +20333,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -20606,7 +20606,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -21397,7 +21397,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -21670,7 +21670,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -22461,7 +22461,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -22734,7 +22734,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23525,7 +23525,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23798,7 +23798,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24589,7 +24589,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -24862,7 +24862,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -25653,7 +25653,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -25926,7 +25926,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -26717,7 +26717,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -26990,7 +26990,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -27811,7 +27811,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder125 + }, { // dummy125 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -28094,7 +28094,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -28915,7 +28915,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder130 + }, { // dummy130 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -29198,7 +29198,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder131 + }, { // dummy131 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -30019,7 +30019,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder135 + }, { // dummy135 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30302,7 +30302,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder136 + }, { // dummy136 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -31123,7 +31123,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder140 + }, { // dummy140 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -31406,7 +31406,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder141 + }, { // dummy141 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -32227,7 +32227,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder145 + }, { // dummy145 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -32510,7 +32510,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder146 + }, { // dummy146 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -33331,7 +33331,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder150 + }, { // dummy150 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -33614,7 +33614,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder151 + }, { // dummy151 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -34435,7 +34435,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder155 + }, { // dummy155 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -34718,7 +34718,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder156 + }, { // dummy156 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -35539,7 +35539,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder160 + }, { // dummy160 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -35822,7 +35822,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder161 + }, { // dummy161 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -36643,7 +36643,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder165 + }, { // dummy165 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -36926,7 +36926,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder166 + }, { // dummy166 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -37747,7 +37747,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder170 + }, { // dummy170 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -38030,7 +38030,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder171 + }, { // dummy171 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -38851,7 +38851,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder175 + }, { // dummy175 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -39134,7 +39134,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder176 + }, { // dummy176 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -39955,7 +39955,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder180 + }, { // dummy180 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -40238,7 +40238,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder181 + }, { // dummy181 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41059,7 +41059,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder185 + }, { // dummy185 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -41342,7 +41342,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder186 + }, { // dummy186 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -42163,7 +42163,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder190 + }, { // dummy190 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -42446,7 +42446,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder191 + }, { // dummy191 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -43267,7 +43267,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder195 + }, { // dummy195 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -43550,7 +43550,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder196 + }, { // dummy196 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -44341,7 +44341,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder200 + }, { // dummy200 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -44614,7 +44614,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder201 + }, { // dummy201 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45405,7 +45405,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder205 + }, { // dummy205 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45678,7 +45678,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder206 + }, { // dummy206 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -46469,7 +46469,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder210 + }, { // dummy210 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -46742,7 +46742,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder211 + }, { // dummy211 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -47533,7 +47533,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder215 + }, { // dummy215 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -47806,7 +47806,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder216 + }, { // dummy216 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -48597,7 +48597,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder220 + }, { // dummy220 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -48870,7 +48870,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder221 + }, { // dummy221 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -49661,7 +49661,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder225 + }, { // dummy225 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49934,7 +49934,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder226 + }, { // dummy226 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -50725,7 +50725,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder230 + }, { // dummy230 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -50998,7 +50998,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder231 + }, { // dummy231 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -51789,7 +51789,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder235 + }, { // dummy235 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -52062,7 +52062,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder236 + }, { // dummy236 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -52853,7 +52853,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder240 + }, { // dummy240 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -53126,7 +53126,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder241 + }, { // dummy241 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -53917,7 +53917,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder245 + }, { // dummy245 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -54190,7 +54190,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder246 + }, { // dummy246 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/while_sum_of_powers.example.cpp b/runtime/test/generated/spec_V1_3/while_sum_of_powers.example.cpp index 2b124c9..1343893 100644 --- a/runtime/test/generated/spec_V1_3/while_sum_of_powers.example.cpp +++ b/runtime/test/generated/spec_V1_3/while_sum_of_powers.example.cpp
@@ -558,7 +558,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1522,7 +1522,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1552,7 +1552,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2045,7 +2045,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3019,7 +3019,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3983,7 +3983,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4013,7 +4013,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4506,7 +4506,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -5480,7 +5480,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6444,7 +6444,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6474,7 +6474,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -6967,7 +6967,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -7941,7 +7941,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8905,7 +8905,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -8935,7 +8935,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9428,7 +9428,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10402,7 +10402,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11366,7 +11366,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11396,7 +11396,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11889,7 +11889,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -12863,7 +12863,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13827,7 +13827,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13857,7 +13857,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder37 + }, { // dummy37 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -14350,7 +14350,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder38 + }, { // dummy38 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -15324,7 +15324,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder42 + }, { // dummy42 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16288,7 +16288,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder43 + }, { // dummy43 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16318,7 +16318,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder44 + }, { // dummy44 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16811,7 +16811,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -17785,7 +17785,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder49 + }, { // dummy49 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18749,7 +18749,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18779,7 +18779,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19272,7 +19272,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder52 + }, { // dummy52 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -20246,7 +20246,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21210,7 +21210,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder57 + }, { // dummy57 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21240,7 +21240,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder58 + }, { // dummy58 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21733,7 +21733,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder59 + }, { // dummy59 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -22707,7 +22707,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder63 + }, { // dummy63 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23671,7 +23671,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder64 + }, { // dummy64 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -23701,7 +23701,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24194,7 +24194,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25168,7 +25168,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26132,7 +26132,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26162,7 +26162,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder72 + }, { // dummy72 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26655,7 +26655,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder73 + }, { // dummy73 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27629,7 +27629,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder77 + }, { // dummy77 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28593,7 +28593,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder78 + }, { // dummy78 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28623,7 +28623,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder79 + }, { // dummy79 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -29116,7 +29116,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -30090,7 +30090,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder84 + }, { // dummy84 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31054,7 +31054,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31084,7 +31084,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -31577,7 +31577,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder87 + }, { // dummy87 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -32551,7 +32551,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -33515,7 +33515,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder92 + }, { // dummy92 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -33545,7 +33545,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder93 + }, { // dummy93 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -34038,7 +34038,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder94 + }, { // dummy94 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -35012,7 +35012,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder98 + }, { // dummy98 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -35976,7 +35976,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder99 + }, { // dummy99 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -36006,7 +36006,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -36499,7 +36499,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -37473,7 +37473,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38437,7 +38437,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38467,7 +38467,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder107 + }, { // dummy107 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -38960,7 +38960,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder108 + }, { // dummy108 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -39934,7 +39934,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder112 + }, { // dummy112 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -40898,7 +40898,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder113 + }, { // dummy113 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -40928,7 +40928,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder114 + }, { // dummy114 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -41421,7 +41421,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -42395,7 +42395,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder119 + }, { // dummy119 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -43359,7 +43359,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -43389,7 +43389,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -43882,7 +43882,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder122 + }, { // dummy122 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -44856,7 +44856,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder126 + }, { // dummy126 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45820,7 +45820,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder127 + }, { // dummy127 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -45850,7 +45850,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder128 + }, { // dummy128 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -46343,7 +46343,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder129 + }, { // dummy129 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -47317,7 +47317,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder133 + }, { // dummy133 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48281,7 +48281,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder134 + }, { // dummy134 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48311,7 +48311,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder135 + }, { // dummy135 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -48804,7 +48804,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder136 + }, { // dummy136 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -49778,7 +49778,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder140 + }, { // dummy140 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -50742,7 +50742,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder141 + }, { // dummy141 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -50772,7 +50772,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder142 + }, { // dummy142 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -51265,7 +51265,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder143 + }, { // dummy143 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -52239,7 +52239,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder147 + }, { // dummy147 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -53203,7 +53203,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder148 + }, { // dummy148 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -53233,7 +53233,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder149 + }, { // dummy149 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -53726,7 +53726,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder150 + }, { // dummy150 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -54700,7 +54700,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder154 + }, { // dummy154 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -55664,7 +55664,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder155 + }, { // dummy155 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -55694,7 +55694,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder156 + }, { // dummy156 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -56187,7 +56187,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder157 + }, { // dummy157 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -57161,7 +57161,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder161 + }, { // dummy161 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -58125,7 +58125,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder162 + }, { // dummy162 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -58155,7 +58155,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder163 + }, { // dummy163 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -58648,7 +58648,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder164 + }, { // dummy164 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -59622,7 +59622,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder168 + }, { // dummy168 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -60586,7 +60586,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder169 + }, { // dummy169 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -60616,7 +60616,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder170 + }, { // dummy170 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -61109,7 +61109,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder171 + }, { // dummy171 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -62083,7 +62083,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder175 + }, { // dummy175 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -63047,7 +63047,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder176 + }, { // dummy176 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -63077,7 +63077,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder177 + }, { // dummy177 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -63570,7 +63570,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder178 + }, { // dummy178 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -64544,7 +64544,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder182 + }, { // dummy182 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -65508,7 +65508,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder183 + }, { // dummy183 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -65538,7 +65538,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder184 + }, { // dummy184 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -66031,7 +66031,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder185 + }, { // dummy185 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -67005,7 +67005,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder189 + }, { // dummy189 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -67969,7 +67969,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder190 + }, { // dummy190 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -67999,7 +67999,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder191 + }, { // dummy191 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -68492,7 +68492,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder192 + }, { // dummy192 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -69466,7 +69466,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder196 + }, { // dummy196 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -70430,7 +70430,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder197 + }, { // dummy197 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -70460,7 +70460,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder198 + }, { // dummy198 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -70953,7 +70953,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder199 + }, { // dummy199 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -71927,7 +71927,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder203 + }, { // dummy203 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -72891,7 +72891,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder204 + }, { // dummy204 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -72921,7 +72921,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder205 + }, { // dummy205 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -73414,7 +73414,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder206 + }, { // dummy206 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/while_sum_of_powers_quant8.example.cpp b/runtime/test/generated/spec_V1_3/while_sum_of_powers_quant8.example.cpp index 65f1621..27178d9 100644 --- a/runtime/test/generated/spec_V1_3/while_sum_of_powers_quant8.example.cpp +++ b/runtime/test/generated/spec_V1_3/while_sum_of_powers_quant8.example.cpp
@@ -592,7 +592,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1624,7 +1624,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -1654,7 +1654,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -2181,7 +2181,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -3223,7 +3223,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -4255,7 +4255,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -4285,7 +4285,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -4812,7 +4812,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -5854,7 +5854,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -6886,7 +6886,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -6916,7 +6916,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -7443,7 +7443,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -8485,7 +8485,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -9517,7 +9517,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -9547,7 +9547,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -10074,7 +10074,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -11116,7 +11116,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -12148,7 +12148,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -12178,7 +12178,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1}, @@ -12705,7 +12705,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 128 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({128}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3/while_sum_of_powers_quant8_signed.example.cpp b/runtime/test/generated/spec_V1_3/while_sum_of_powers_quant8_signed.example.cpp index d2b55db..5065e3a 100644 --- a/runtime/test/generated/spec_V1_3/while_sum_of_powers_quant8_signed.example.cpp +++ b/runtime/test/generated/spec_V1_3/while_sum_of_powers_quant8_signed.example.cpp
@@ -592,7 +592,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -1624,7 +1624,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -1654,7 +1654,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -2181,7 +2181,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -3223,7 +3223,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder7 + }, { // dummy7 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -4255,7 +4255,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder8 + }, { // dummy8 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -4285,7 +4285,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder9 + }, { // dummy9 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -4812,7 +4812,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -5854,7 +5854,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder14 + }, { // dummy14 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -6886,7 +6886,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -6916,7 +6916,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -7443,7 +7443,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder17 + }, { // dummy17 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -8485,7 +8485,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -9517,7 +9517,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder22 + }, { // dummy22 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -9547,7 +9547,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder23 + }, { // dummy23 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -10074,7 +10074,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder24 + }, { // dummy24 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -11116,7 +11116,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder28 + }, { // dummy28 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -12148,7 +12148,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder29 + }, { // dummy29 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -12178,7 +12178,7 @@ .scale = 0.5f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1}, @@ -12705,7 +12705,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 12 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({12}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3_cts_only/conv2d_v1_3_invalid_rank.example.cpp b/runtime/test/generated/spec_V1_3_cts_only/conv2d_v1_3_invalid_rank.example.cpp index 4d59d79..d0de7e1 100644 --- a/runtime/test/generated/spec_V1_3_cts_only/conv2d_v1_3_invalid_rank.example.cpp +++ b/runtime/test/generated/spec_V1_3_cts_only/conv2d_v1_3_invalid_rank.example.cpp
@@ -290,7 +290,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -618,7 +618,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -648,7 +648,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder2 + }, { // dummy2 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -678,7 +678,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder3 + }, { // dummy3 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3_cts_only/if_simple_unknown_dimension.example.cpp b/runtime/test/generated/spec_V1_3_cts_only/if_simple_unknown_dimension.example.cpp index 4672d76..91b21f2 100644 --- a/runtime/test/generated/spec_V1_3_cts_only/if_simple_unknown_dimension.example.cpp +++ b/runtime/test/generated/spec_V1_3_cts_only/if_simple_unknown_dimension.example.cpp
@@ -248,7 +248,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -447,7 +447,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -841,7 +841,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1434,7 +1434,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1633,7 +1633,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2202,7 +2202,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2401,7 +2401,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2795,7 +2795,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1}, @@ -2994,7 +2994,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1}, @@ -3388,7 +3388,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3587,7 +3587,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3981,7 +3981,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4180,7 +4180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4574,7 +4574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4773,7 +4773,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5342,7 +5342,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -5541,7 +5541,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -5935,7 +5935,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1}, @@ -6134,7 +6134,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3_cts_only/if_simple_unknown_rank.example.cpp b/runtime/test/generated/spec_V1_3_cts_only/if_simple_unknown_rank.example.cpp index 8921587..6c852fa 100644 --- a/runtime/test/generated/spec_V1_3_cts_only/if_simple_unknown_rank.example.cpp +++ b/runtime/test/generated/spec_V1_3_cts_only/if_simple_unknown_rank.example.cpp
@@ -248,7 +248,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -447,7 +447,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -841,7 +841,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1040,7 +1040,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1434,7 +1434,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -1633,7 +1633,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -2202,7 +2202,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2401,7 +2401,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -2795,7 +2795,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1}, @@ -2994,7 +2994,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1}, @@ -3388,7 +3388,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3587,7 +3587,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3981,7 +3981,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4180,7 +4180,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -4574,7 +4574,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4773,7 +4773,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5342,7 +5342,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -5541,7 +5541,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 100 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({100}), .dimensions = {1}, @@ -5935,7 +5935,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1}, @@ -6134,7 +6134,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 100 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({100}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3_cts_only/while_fib_unknown_dimension.example.cpp b/runtime/test/generated/spec_V1_3_cts_only/while_fib_unknown_dimension.example.cpp index b173fd5..df943e5 100644 --- a/runtime/test/generated/spec_V1_3_cts_only/while_fib_unknown_dimension.example.cpp +++ b/runtime/test/generated/spec_V1_3_cts_only/while_fib_unknown_dimension.example.cpp
@@ -843,7 +843,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1242,7 +1242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2411,7 +2411,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2810,7 +2810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3979,7 +3979,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4378,7 +4378,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5547,7 +5547,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5946,7 +5946,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -7115,7 +7115,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7514,7 +7514,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -8683,7 +8683,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9082,7 +9082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10251,7 +10251,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10650,7 +10650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11819,7 +11819,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12218,7 +12218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13387,7 +13387,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -13786,7 +13786,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -14955,7 +14955,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -15354,7 +15354,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -16523,7 +16523,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16922,7 +16922,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18091,7 +18091,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18490,7 +18490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19659,7 +19659,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20058,7 +20058,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21227,7 +21227,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -21626,7 +21626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -22795,7 +22795,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -23194,7 +23194,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -24333,7 +24333,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24722,7 +24722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25861,7 +25861,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26250,7 +26250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27389,7 +27389,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -27778,7 +27778,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28917,7 +28917,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -29306,7 +29306,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -30445,7 +30445,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -30834,7 +30834,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -31973,7 +31973,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -32362,7 +32362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -33501,7 +33501,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -33890,7 +33890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -35029,7 +35029,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -35418,7 +35418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -36557,7 +36557,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -36946,7 +36946,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -38085,7 +38085,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -38474,7 +38474,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/generated/spec_V1_3_cts_only/while_fib_unknown_rank.example.cpp b/runtime/test/generated/spec_V1_3_cts_only/while_fib_unknown_rank.example.cpp index 0f846fb..e8497bc 100644 --- a/runtime/test/generated/spec_V1_3_cts_only/while_fib_unknown_rank.example.cpp +++ b/runtime/test/generated/spec_V1_3_cts_only/while_fib_unknown_rank.example.cpp
@@ -843,7 +843,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder + }, { // dummy .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -1242,7 +1242,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder1 + }, { // dummy1 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2411,7 +2411,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder5 + }, { // dummy5 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -2810,7 +2810,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder6 + }, { // dummy6 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -3979,7 +3979,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder10 + }, { // dummy10 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -4378,7 +4378,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder11 + }, { // dummy11 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -5547,7 +5547,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder15 + }, { // dummy15 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -5946,7 +5946,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder16 + }, { // dummy16 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -7115,7 +7115,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder20 + }, { // dummy20 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -7514,7 +7514,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder21 + }, { // dummy21 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -8683,7 +8683,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder25 + }, { // dummy25 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -9082,7 +9082,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder26 + }, { // dummy26 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10251,7 +10251,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder30 + }, { // dummy30 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -10650,7 +10650,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder31 + }, { // dummy31 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -11819,7 +11819,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder35 + }, { // dummy35 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -12218,7 +12218,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder36 + }, { // dummy36 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -13387,7 +13387,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder40 + }, { // dummy40 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -13786,7 +13786,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder41 + }, { // dummy41 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -14955,7 +14955,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder45 + }, { // dummy45 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -15354,7 +15354,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder46 + }, { // dummy46 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -16523,7 +16523,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder50 + }, { // dummy50 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -16922,7 +16922,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder51 + }, { // dummy51 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18091,7 +18091,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder55 + }, { // dummy55 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -18490,7 +18490,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder56 + }, { // dummy56 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -19659,7 +19659,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder60 + }, { // dummy60 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -20058,7 +20058,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder61 + }, { // dummy61 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -21227,7 +21227,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder65 + }, { // dummy65 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -21626,7 +21626,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder66 + }, { // dummy66 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -22795,7 +22795,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder70 + }, { // dummy70 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -23194,7 +23194,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_INT32, .zeroPoint = 0 - }, { // placeholder71 + }, { // dummy71 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -24333,7 +24333,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder75 + }, { // dummy75 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -24722,7 +24722,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder76 + }, { // dummy76 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -25861,7 +25861,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder80 + }, { // dummy80 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -26250,7 +26250,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder81 + }, { // dummy81 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -27389,7 +27389,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder85 + }, { // dummy85 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -27778,7 +27778,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder86 + }, { // dummy86 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -28917,7 +28917,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder90 + }, { // dummy90 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -29306,7 +29306,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder91 + }, { // dummy91 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -30445,7 +30445,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder95 + }, { // dummy95 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -30834,7 +30834,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder96 + }, { // dummy96 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -31973,7 +31973,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder100 + }, { // dummy100 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -32362,7 +32362,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder101 + }, { // dummy101 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -33501,7 +33501,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder105 + }, { // dummy105 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -33890,7 +33890,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT32, .zeroPoint = 0 - }, { // placeholder106 + }, { // dummy106 .channelQuant = {}, .data = TestBuffer::createFromVector<float>({0.0f}), .dimensions = {1}, @@ -35029,7 +35029,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder110 + }, { // dummy110 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -35418,7 +35418,7 @@ .scale = 0.0f, .type = TestOperandType::TENSOR_FLOAT16, .zeroPoint = 0 - }, { // placeholder111 + }, { // dummy111 .channelQuant = {}, .data = TestBuffer::createFromVector<_Float16>({0.0f}), .dimensions = {1}, @@ -36557,7 +36557,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder115 + }, { // dummy115 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -36946,7 +36946,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM, .zeroPoint = 0 - }, { // placeholder116 + }, { // dummy116 .channelQuant = {}, .data = TestBuffer::createFromVector<uint8_t>({0}), .dimensions = {1}, @@ -38085,7 +38085,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder120 + }, { // dummy120 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1}, @@ -38474,7 +38474,7 @@ .scale = 1.0f, .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, .zeroPoint = 0 - }, { // placeholder121 + }, { // dummy121 .channelQuant = {}, .data = TestBuffer::createFromVector<int8_t>({0}), .dimensions = {1},
diff --git a/runtime/test/shaders/TestGpuNnapi.comp b/runtime/test/shaders/TestGpuNnapi.comp deleted file mode 100644 index 47acd24..0000000 --- a/runtime/test/shaders/TestGpuNnapi.comp +++ /dev/null
@@ -1,36 +0,0 @@ -/* - * Copyright 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// glslc TestGpuNnapi.comp -O --target-env=vulkan1.1 -mfmt=c -o TestGpuNnapi.comp.spv.inl - -#version 450 -#pragma shader_stage(compute) - -layout (local_size_x_id = 0, local_size_y_id = 1) in; - -// The 4-byte chunk to set for each output buffer entry. -layout (constant_id = 2) const uint CLEAR_DATA = 0; - -layout (binding = 0, std430) buffer Output { - uint data[]; -} outputBuffer; - -void main() { - uint size_x = gl_WorkGroupSize.x * gl_NumWorkGroups.x; - uint index = gl_GlobalInvocationID.x + gl_GlobalInvocationID.y * size_x; - outputBuffer.data[index] = CLEAR_DATA; -}
diff --git a/runtime/test/shaders/TestGpuNnapi.comp.spv.inl b/runtime/test/shaders/TestGpuNnapi.comp.spv.inl deleted file mode 100644 index 6478c28..0000000 --- a/runtime/test/shaders/TestGpuNnapi.comp.spv.inl +++ /dev/null
@@ -1,55 +0,0 @@ -{0x07230203,0x00010300,0x000d000a,0x00000029, -0x00000000,0x00020011,0x00000001,0x0006000b, -0x00000001,0x4c534c47,0x6474732e,0x3035342e, -0x00000000,0x0003000e,0x00000000,0x00000001, -0x0007000f,0x00000005,0x00000004,0x6e69616d, -0x00000000,0x00000011,0x00000017,0x00060010, -0x00000004,0x00000011,0x00000001,0x00000001, -0x00000001,0x00040047,0x00000009,0x00000001, -0x00000000,0x00040047,0x0000000a,0x00000001, -0x00000001,0x00040047,0x0000000d,0x0000000b, -0x00000019,0x00040047,0x00000011,0x0000000b, -0x00000018,0x00040047,0x00000017,0x0000000b, -0x0000001c,0x00040047,0x0000001f,0x00000006, -0x00000004,0x00050048,0x00000020,0x00000000, -0x00000023,0x00000000,0x00030047,0x00000020, -0x00000002,0x00040047,0x00000022,0x00000022, -0x00000000,0x00040047,0x00000022,0x00000021, -0x00000000,0x00040047,0x00000026,0x00000001, -0x00000002,0x00020013,0x00000002,0x00030021, -0x00000003,0x00000002,0x00040015,0x00000006, -0x00000020,0x00000000,0x00040032,0x00000006, -0x00000009,0x00000001,0x00040032,0x00000006, -0x0000000a,0x00000001,0x0004002b,0x00000006, -0x0000000b,0x00000001,0x00040017,0x0000000c, -0x00000006,0x00000003,0x00060033,0x0000000c, -0x0000000d,0x00000009,0x0000000a,0x0000000b, -0x0004002b,0x00000006,0x0000000e,0x00000000, -0x00040020,0x00000010,0x00000001,0x0000000c, -0x0004003b,0x00000010,0x00000011,0x00000001, -0x00040020,0x00000012,0x00000001,0x00000006, -0x0004003b,0x00000010,0x00000017,0x00000001, -0x0003001d,0x0000001f,0x00000006,0x0003001e, -0x00000020,0x0000001f,0x00040020,0x00000021, -0x0000000c,0x00000020,0x0004003b,0x00000021, -0x00000022,0x0000000c,0x00040015,0x00000023, -0x00000020,0x00000001,0x0004002b,0x00000023, -0x00000024,0x00000000,0x00040032,0x00000006, -0x00000026,0x00000000,0x00040020,0x00000027, -0x0000000c,0x00000006,0x00050036,0x00000002, -0x00000004,0x00000000,0x00000003,0x000200f8, -0x00000005,0x00050051,0x00000006,0x0000000f, -0x0000000d,0x00000000,0x00050041,0x00000012, -0x00000013,0x00000011,0x0000000e,0x0004003d, -0x00000006,0x00000014,0x00000013,0x00050084, -0x00000006,0x00000015,0x0000000f,0x00000014, -0x00050041,0x00000012,0x00000018,0x00000017, -0x0000000e,0x0004003d,0x00000006,0x00000019, -0x00000018,0x00050041,0x00000012,0x0000001a, -0x00000017,0x0000000b,0x0004003d,0x00000006, -0x0000001b,0x0000001a,0x00050084,0x00000006, -0x0000001d,0x0000001b,0x00000015,0x00050080, -0x00000006,0x0000001e,0x00000019,0x0000001d, -0x00060041,0x00000027,0x00000028,0x00000022, -0x00000024,0x0000001e,0x0003003e,0x00000028, -0x00000026,0x000100fd,0x00010038}
diff --git a/runtime/test/specs/V1_2/bbox_graph.mod.py b/runtime/test/specs/V1_2/bbox_graph.mod.py index 0834a92..253740b 100644 --- a/runtime/test/specs/V1_2/bbox_graph.mod.py +++ b/runtime/test/specs/V1_2/bbox_graph.mod.py
@@ -83,7 +83,7 @@ image: [32, 32], feature: [1], - # Placeholder outputs + # Dummy outputs scoresOut_1: [], scoresOut_6: [], roiOut_6: [],
diff --git a/runtime/test/specs/V1_3/bbox_graph_quant8_signed.mod.py b/runtime/test/specs/V1_3/bbox_graph_quant8_signed.mod.py index 83751f1..6afab65 100644 --- a/runtime/test/specs/V1_3/bbox_graph_quant8_signed.mod.py +++ b/runtime/test/specs/V1_3/bbox_graph_quant8_signed.mod.py
@@ -83,7 +83,7 @@ image: [32, 32], feature: [1], - # Placeholder outputs + # Dummy outputs scoresOut_1: [], scoresOut_6: [], roiOut_6: [],
diff --git a/runtime/test/specs/V1_3/while_infinite_loop.mod.py b/runtime/test/specs/V1_3/while_infinite_loop.mod.py index a0fbafe..9e97437 100644 --- a/runtime/test/specs/V1_3/while_infinite_loop.mod.py +++ b/runtime/test/specs/V1_3/while_infinite_loop.mod.py
@@ -57,4 +57,5 @@ example = Example({n: [0.0], i_out: [0.0]}, model=model) example.AddVariations("relaxed", "float16", quant8, quant8_signed) example.DisableLifeTimeVariation() +example.DisableDynamicOutputShapeVariation() example.ExpectFailure()
diff --git a/shim_and_sl/Android.bp b/shim_and_sl/Android.bp deleted file mode 100644 index b8d9264..0000000 --- a/shim_and_sl/Android.bp +++ /dev/null
@@ -1,132 +0,0 @@ -/* - * Copyright 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// package has name collision with other module, must use licenses in targets -// package { -// default_applicable_licenses: ["Android-Apache-2.0"], -// } - -cc_library_headers { - name: "neuralnetworks_supportlibrary_types_ndk", - export_include_dirs: ["public"], - licenses: ["Android-Apache-2.0"], - sdk_version: "current", - vendor_available: true, - min_sdk_version: "29", -} - -/** Version of the shim (Adapter between SL/Updatable Driver and sAIDL service) - * intended to be used by a non-updatable (without an OTA) NNAPI vendor drivers - * backed by a SL/Updatable Driver. - */ -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - -cc_library_static { - name: "libneuralnetworks_shim_static", - apex_available: [ - "//apex_available:platform", - "com.android.neuralnetworks", - "test_com.android.neuralnetworks", - ], - srcs: [ - "NeuralNetworksShim.cpp", - "ShimBufferTracker.cpp", - "ShimConverter.cpp", - "ShimDevice.cpp", - "ShimDeviceManager.cpp", - "ShimPreparedModel.cpp", - "ShimUtils.cpp", - ], - licenses: ["Android-Apache-2.0"], - vendor_available: true, - min_sdk_version: "30", - cflags: [ - // Needed by neuralnetworks_supportlibrary_loader - // Should be removed after doing b/117845862 - "-DNNTEST_SLTS", - "-DNN_COMPATIBILITY_LIBRARY_BUILD", - "-Wall", - "-Werror", - ], - header_libs: [ - "libneuralnetworks_headers", - ], - local_include_dirs: [ - "include", - ], - static_libs: [ - "android.hardware.common-V2-ndk_platform", - "android.hardware.neuralnetworks-V1-ndk_platform", - "libaidlcommonsupport", - "libarect", - "libcutils", - "libneuralnetworks_common", - "neuralnetworks_supportlibrary_loader", - "neuralnetworks_utils_hal_aidl", - "neuralnetworks_utils_hal_common", - ], - shared_libs: [ - "libbase", - "libbinder_ndk", - "libhidlbase", - "libhidlmemory", - "liblog", - "libnativewindow", - ], - export_include_dirs: [ - "public", - ], -} - -cc_library_static { - name: "neuralnetworks_supportlibrary_loader", - srcs: [ - "SupportLibrary.cpp", - "SupportLibraryWrapper.cpp", - ], - shared_libs: [ - "libnativewindow", - ], - cflags: [ - "-DNNTEST_COMPUTE_MODE", - "-DNNTEST_ONLY_PUBLIC_API", - "-DNNTEST_SLTS", - "-DNN_COMPATIBILITY_LIBRARY_BUILD", - "-Wall", - "-Werror", - ], - apex_available: [ - "//apex_available:platform", - "com.android.neuralnetworks", - "test_com.android.neuralnetworks", - ], - export_include_dirs: [ - "include", - "public", - ], - static_libs: [ - "libarect", - "libbase", - ], - licenses: ["Android-Apache-2.0"], - vendor_available: true, - min_sdk_version: "29", - header_libs: [ - "libneuralnetworks_headers", - ], -}
diff --git a/shim_and_sl/NeuralNetworksShim.cpp b/shim_and_sl/NeuralNetworksShim.cpp deleted file mode 100644 index 35697ce..0000000 --- a/shim_and_sl/NeuralNetworksShim.cpp +++ /dev/null
@@ -1,186 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "NeuralNetworksShim" - -#include "NeuralNetworksShim.h" - -#include <android-base/logging.h> -#include <nnapi/Types.h> - -#include <limits> -#include <string> -#include <utility> -#include <vector> - -#include "ShimDevice.h" -#include "ShimDeviceManager.h" - -static_assert(offsetof(NnApiSLDriverImplFL5, base.implFeatureLevel) == 0, - ".base.implFeatureLevel is not at offset 0 of a NnApiSLDriverImplFL5 struct"); -static_assert(offsetof(NnApiSLDriverImpl, implFeatureLevel) == 0, - ".implFeatureLevel is not at offset 0 of a NnApiSLDriverImpl struct"); - -static_assert(sizeof(NnApiSLDriverImpl) == sizeof(int64_t), "NnApiSLDriverImpl size changed"); - -// 71 real ones and 1 synthetic placeholder to algin to 8 bytes on 32 bit archs -static_assert(sizeof(NnApiSLDriverImplFL5) == sizeof(int64_t) + 78 * sizeof(void*), - "NnApiSLDriverImplFL5 size changed"); - -static_assert(ANNSHIM_NO_ERROR == 0, "ANNSHIM_NO_ERROR has changed"); -static_assert(ANNSHIM_FAILED_TO_LOAD_SL == 1, "ANNSHIM_FAILED_TO_LOAD_SL has changed"); -static_assert(ANNSHIM_FAILED_TO_REGISTER_SERVICE == 2, - "ANNSHIM_FAILED_TO_REGISTER_SERVICE has changed"); -static_assert(ANNSHIM_GENERAL_ERROR == 3, "ANNSHIM_GENERAL_ERROR has changed"); -static_assert(ANNSHIM_INVALID_ARGUMENT == 4, "ANNSHIM_INVALID_ARGUMENT has changed"); - -using android::neuralnetworks::shim::registerDevices; -using android::neuralnetworks::shim::RegistrationParams; -using android::neuralnetworks::shim::ShimDeviceInfo; - -int ANeuralNetworksShim_registerSupportLibraryService( - const ANeuralNetworksShimRegistrationParams* registrationParams) { - if (registrationParams == nullptr) { - LOG(ERROR) << "Invalid arguments, registrationParams == nullptr "; - return ANNSHIM_INVALID_ARGUMENT; - } - const auto* params = reinterpret_cast<const RegistrationParams*>(registrationParams); - - NnApiSLDriverImpl* const nnapiImpl = params->nnapiSupportLibraryPackage; - const auto& deviceInfos = params->deviceInfos; - const uint32_t numberOfListenerThreads = params->numberOfListenerThreads; - const bool registerAsLazyService = params->registerAsLazyService; - const bool fallbackToMinimumSupportDevice = params->fallbackToMinimumSupportDevice; - - return static_cast<int>(registerDevices(nnapiImpl, deviceInfos, numberOfListenerThreads, - registerAsLazyService, fallbackToMinimumSupportDevice)); -} - -int ANeuralNetworksShimDeviceInfo_create(ANeuralNetworksShimDeviceInfo** deviceInfo, - const char* deviceName, const char* serviceName) { - if (deviceInfo != nullptr) { - *deviceInfo = nullptr; - } - - if (deviceName == nullptr) { - LOG(ERROR) << "Invalid arguments, deviceName passed a nullptr"; - return ANNSHIM_INVALID_ARGUMENT; - } - - auto result = new (std::nothrow) - ShimDeviceInfo{.deviceName = std::string(deviceName), - .serviceName = (serviceName == nullptr || strlen(serviceName) == 0) - ? std::string(deviceName) - : std::string(serviceName)}; - if (result == nullptr) { - return ANNSHIM_GENERAL_ERROR; - } - *deviceInfo = reinterpret_cast<ANeuralNetworksShimDeviceInfo*>(result); - return ANNSHIM_NO_ERROR; -} - -void ANeuralNetworksShimDeviceInfo_free(ANeuralNetworksShimDeviceInfo* deviceInfo) { - delete reinterpret_cast<ShimDeviceInfo*>(deviceInfo); -} - -int ANeuralNetworksShimRegistrationParams_create( - NnApiSLDriverImpl* nnapiSupportLibraryPackage, - ANeuralNetworksShimRegistrationParams** outRegistrationParams) { - if (outRegistrationParams != nullptr) { - *outRegistrationParams = nullptr; - } - - if (nnapiSupportLibraryPackage == nullptr) { - LOG(ERROR) << "Invalid arguments, nnapiSupportLibraryPackage == nullptr "; - return ANNSHIM_INVALID_ARGUMENT; - } - if (outRegistrationParams == nullptr) { - LOG(ERROR) << "Invalid arguments, outRegistrationParams == nullptr "; - return ANNSHIM_INVALID_ARGUMENT; - } - - auto result = new (std::nothrow) RegistrationParams{ - .nnapiSupportLibraryPackage = nnapiSupportLibraryPackage, - .registerAsLazyService = false, - .fallbackToMinimumSupportDevice = false, - }; - if (result == nullptr) { - return ANNSHIM_GENERAL_ERROR; - } - *outRegistrationParams = reinterpret_cast<ANeuralNetworksShimRegistrationParams*>(result); - return ANNSHIM_NO_ERROR; -} - -void ANeuralNetworksShimRegistrationParams_free( - ANeuralNetworksShimRegistrationParams* registrationParams) { - delete reinterpret_cast<RegistrationParams*>(registrationParams); -} - -int ANeuralNetworksShimRegistrationParams_addDeviceInfo( - ANeuralNetworksShimRegistrationParams* registrationParams, - const ANeuralNetworksShimDeviceInfo* deviceInfo) { - if (registrationParams == nullptr) { - LOG(ERROR) << "Invalid arguments, registrationParams == nullptr"; - return ANNSHIM_INVALID_ARGUMENT; - } - if (deviceInfo == nullptr) { - LOG(ERROR) << "Invalid arguments, deviceInfo == nullptr"; - return ANNSHIM_INVALID_ARGUMENT; - } - - auto params = reinterpret_cast<RegistrationParams*>(registrationParams); - auto info = reinterpret_cast<const ShimDeviceInfo*>(deviceInfo); - params->deviceInfos.push_back(*info); - return ANNSHIM_NO_ERROR; -} - -int ANeuralNetworksShimRegistrationParams_setNumberOfListenerThreads( - ANeuralNetworksShimRegistrationParams* registrationParams, - uint32_t numberOfListenerThreads) { - if (registrationParams == nullptr) { - LOG(ERROR) << "Invalid arguments, registrationParams == nullptr"; - return ANNSHIM_INVALID_ARGUMENT; - } - if (registrationParams == 0) { - LOG(ERROR) << "Invalid arguments, numberOfListenerThreads == 0"; - return ANNSHIM_INVALID_ARGUMENT; - } - auto params = reinterpret_cast<RegistrationParams*>(registrationParams); - params->numberOfListenerThreads = numberOfListenerThreads; - return ANNSHIM_NO_ERROR; -} - -int ANeuralNetworksShimRegistrationParams_registerAsLazyService( - ANeuralNetworksShimRegistrationParams* registrationParams, bool asLazy) { - if (registrationParams == nullptr) { - LOG(ERROR) << "Invalid arguments, registrationParams == nullptr"; - return ANNSHIM_INVALID_ARGUMENT; - } - auto params = reinterpret_cast<RegistrationParams*>(registrationParams); - params->registerAsLazyService = asLazy; - return ANNSHIM_NO_ERROR; -} - -int ANeuralNetworksShimRegistrationParams_fallbackToMinimumSupportDevice( - ANeuralNetworksShimRegistrationParams* registrationParams, bool fallback) { - if (registrationParams == nullptr) { - LOG(ERROR) << "Invalid arguments, registrationParams == nullptr"; - return ANNSHIM_INVALID_ARGUMENT; - } - auto params = reinterpret_cast<RegistrationParams*>(registrationParams); - params->fallbackToMinimumSupportDevice = fallback; - return ANNSHIM_NO_ERROR; -}
diff --git a/shim_and_sl/ShimBufferTracker.cpp b/shim_and_sl/ShimBufferTracker.cpp deleted file mode 100644 index 6abf1b4..0000000 --- a/shim_and_sl/ShimBufferTracker.cpp +++ /dev/null
@@ -1,69 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "ShimBufferTracker" - -#include "ShimBufferTracker.h" -#include "ShimDevice.h" - -#include <android-base/logging.h> - -#include <algorithm> -#include <memory> -#include <string> -#include <utility> -#include <vector> - -using namespace ::android::nn::sl_wrapper; - -namespace aidl::android::hardware::neuralnetworks { - -std::unique_ptr<ShimBufferTracker::Token> ShimBufferTracker::add( - std::shared_ptr<::android::nn::sl_wrapper::Memory> buffer) { - if (buffer == nullptr) { - return nullptr; - } - std::lock_guard<std::mutex> guard(mMutex); - uint32_t token = 0; - if (mFreeTokens.empty()) { - token = mTokenToBuffers.size(); - mTokenToBuffers.push_back(std::move(buffer)); - } else { - token = mFreeTokens.top(); - mFreeTokens.pop(); - mTokenToBuffers[token] = std::move(buffer); - } - return std::make_unique<Token>(token, shared_from_this()); -} - -std::shared_ptr<::android::nn::sl_wrapper::Memory> ShimBufferTracker::get(uint32_t token) const { - std::lock_guard<std::mutex> guard(mMutex); - if (mTokenToBuffers.size() <= token || mTokenToBuffers[token] == nullptr) { - LOG(ERROR) << "ShimBufferTracker::get -- unknown token " << token; - return nullptr; - } - return mTokenToBuffers[token]; -} - -void ShimBufferTracker::free(uint32_t token) { - std::lock_guard<std::mutex> guard(mMutex); - CHECK_LT(token, mTokenToBuffers.size()); - CHECK(mTokenToBuffers[token] != nullptr); - mTokenToBuffers[token] = nullptr; - mFreeTokens.push(token); -} - -} // namespace aidl::android::hardware::neuralnetworks
diff --git a/shim_and_sl/ShimConverter.cpp b/shim_and_sl/ShimConverter.cpp deleted file mode 100644 index 398cc9b..0000000 --- a/shim_and_sl/ShimConverter.cpp +++ /dev/null
@@ -1,496 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "ShimConverter" - -#include "ShimConverter.h" - -#include <aidlcommonsupport/NativeHandle.h> -#include <android-base/logging.h> -#include <android-base/mapped_file.h> -#include <android-base/scopeguard.h> -#include <android/hardware_buffer.h> -#include <cutils/native_handle.h> -#include <nnapi/TypeUtils.h> -#include <nnapi/hal/aidl/Conversions.h> -#include <nnapi/hal/aidl/Utils.h> -#include <sys/mman.h> -#include <vndk/hardware_buffer.h> - -#include <algorithm> -#include <memory> -#include <string> -#include <utility> -#include <vector> - -using namespace ::android::nn::sl_wrapper; - -namespace aidl::android::hardware::neuralnetworks { - -namespace { - -// Assumes that isValid(model) holds -ANeuralNetworksModel* convertSubgraphFromHAL( - const NnApiSupportLibrary* nnapi, - const std::vector<std::unique_ptr<::android::nn::sl_wrapper::Memory>>& memoryPools, - const neuralnetworks::Model& model, - std::vector<std::optional<::android::nn::sl_wrapper::Model>>* allModels, - size_t subgraphIndex, const std::vector<uint8_t>& copiedOperandValues, - ErrorStatus* errorStatus) { - *errorStatus = ErrorStatus::NONE; - if ((*allModels)[subgraphIndex].has_value()) { - return (*allModels)[subgraphIndex]->getHandle(); - } - - const auto& subgraph = subgraphIndex == 0 ? model.main : model.referenced[subgraphIndex - 1]; - ::android::nn::sl_wrapper::Model resultModel(nnapi); - - resultModel.relaxComputationFloat32toFloat16(model.relaxComputationFloat32toFloat16); - - auto getExtensionName = [&](uint16_t prefix) -> const std::string* { - for (const auto& nameToPrefix : model.extensionNameToPrefix) { - if (prefix == nameToPrefix.prefix) { - return &nameToPrefix.name; - } - } - return nullptr; - }; - - for (int i = 0; i < subgraph.operands.size(); ++i) { - const auto& operand = subgraph.operands[i]; - - const std::vector<uint32_t> dimensions = - ::android::nn::toUnsigned(operand.dimensions).value(); - - ::android::nn::wrapper::OperandType operandType( - static_cast<::android::nn::wrapper::Type>(operand.type), dimensions, operand.scale, - operand.zeroPoint); - - if (operand.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - const auto& params = operand.extraParams->get<OperandExtraParams::Tag::channelQuant>(); - operandType.channelQuant = ::android::nn::wrapper::SymmPerChannelQuantParams( - params.scales, static_cast<uint32_t>(params.channelDim)); - } - - if (::android::nn::isExtension(static_cast<::android::nn::OperandType>(operand.type))) { - uint16_t extensionPrefix = - ::android::nn::getExtensionPrefix(static_cast<uint32_t>(operand.type)); - uint16_t typeWithinExtension = - ::android::nn::getTypeWithinExtension(static_cast<uint32_t>(operand.type)); - - auto* extensionName = getExtensionName(extensionPrefix); - if (extensionName == nullptr) { - LOG(ERROR) << "Unknown extension prefix " << extensionPrefix; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return nullptr; - } - resultModel.getExtensionOperandType(*extensionName, typeWithinExtension, - &operandType.operandType.type); - if (!resultModel.isValid()) { - LOG(ERROR) << "Failed to get extension operand with index " << i; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return nullptr; - } - } - - uint32_t operandIndex = resultModel.addOperand(&operandType); - if (!resultModel.isValid()) { - LOG(ERROR) << "Failed to add operand with index " << i; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return nullptr; - } - - if (operand.extraParams && - operand.extraParams->getTag() == OperandExtraParams::Tag::extension) { - const auto& extensionData = - operand.extraParams->get<OperandExtraParams::Tag::extension>(); - resultModel.setOperandExtensionData(operandIndex, extensionData.data(), - extensionData.size()); - if (!resultModel.isValid()) { - LOG(ERROR) << "Failed to add extension data for operand with index " << i; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return nullptr; - } - } - - switch (operand.lifetime) { - case OperandLifeTime::CONSTANT_COPY: { - if (operand.location.length <= - ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES) { - resultModel.setOperandValue( - i, model.operandValues.data() + operand.location.offset, - operand.location.length); - } else { - // If length is larger than 128 bytes, we are responsible for making sure - // that value outlives the model. If this case exists, then we created - // an internal copy, that is used here: - resultModel.setOperandValue( - i, copiedOperandValues.data() + operand.location.offset, - operand.location.length); - } - break; - } - case OperandLifeTime::CONSTANT_POOL: { - resultModel.setOperandValueFromMemory( - i, memoryPools[operand.location.poolIndex].get(), operand.location.offset, - operand.location.length); - break; - } - case OperandLifeTime::SUBGRAPH: { - ErrorStatus otherErrorStatus = ErrorStatus::NONE; - auto subgraph = convertSubgraphFromHAL(nnapi, memoryPools, model, allModels, - operand.location.offset + 1, - copiedOperandValues, &otherErrorStatus); - if (subgraph) { - resultModel.setOperandValueFromModel(i, subgraph); - } else { - LOG(ERROR) << "Failed to set subgraph operand value"; - *errorStatus = otherErrorStatus; - return nullptr; - } - break; - } - case OperandLifeTime::NO_VALUE: { - resultModel.setOperandValue(i, nullptr, 0); - break; - } - case OperandLifeTime::TEMPORARY_VARIABLE: - case OperandLifeTime::SUBGRAPH_OUTPUT: - case OperandLifeTime::SUBGRAPH_INPUT: { - break; - } - default: - LOG(ERROR) << "Invalid operand type: " << static_cast<int>(operand.lifetime); - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return nullptr; - } - - if (!resultModel.isValid()) { - LOG(ERROR) << "Failed to add operand with index " << i; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return nullptr; - } - } - - for (int i = 0; i < subgraph.operations.size(); ++i) { - const auto& operation = subgraph.operations[i]; - - std::vector<uint32_t> inputs(operation.inputs.begin(), operation.inputs.end()); - std::vector<uint32_t> outputs(operation.outputs.begin(), operation.outputs.end()); - - int operationType = static_cast<int>(operation.type); - if (::android::nn::isExtension(static_cast<::android::nn::OperationType>(operationType))) { - uint16_t extensionPrefix = - ::android::nn::getExtensionPrefix(static_cast<uint32_t>(operationType)); - uint16_t typeWithinExtension = - ::android::nn::getTypeWithinExtension(static_cast<uint32_t>(operationType)); - auto* extensionName = getExtensionName(extensionPrefix); - if (extensionName == nullptr) { - LOG(ERROR) << "Unknown extension prefix " << extensionPrefix; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return nullptr; - } - resultModel.getExtensionOperationType(*extensionName, typeWithinExtension, - &operationType); - if (!resultModel.isValid()) { - LOG(ERROR) << "Failed to get extension operation with index " << i; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return nullptr; - } - } - - resultModel.addOperation(operationType, inputs, outputs); - - if (!resultModel.isValid()) { - LOG(ERROR) << "Failed to add operation with index " << i; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return nullptr; - } - } - - std::vector<uint32_t> inputIndexes(subgraph.inputIndexes.begin(), subgraph.inputIndexes.end()); - std::vector<uint32_t> outputIndexes(subgraph.outputIndexes.begin(), - subgraph.outputIndexes.end()); - - resultModel.identifyInputsAndOutputs(inputIndexes, outputIndexes); - if (!resultModel.isValid()) { - LOG(ERROR) << "Model identifyInputsAndOutputs failed"; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return nullptr; - } - - if (resultModel.finish() != Result::NO_ERROR) { - LOG(ERROR) << "Model finish failed"; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return nullptr; - } - - if (!resultModel.isValid()) { - LOG(ERROR) << "Invalid model"; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return nullptr; - } - - (*allModels)[subgraphIndex] = std::move(resultModel); - return (*allModels)[subgraphIndex]->getHandle(); -} - -// This is needed for CONSTANT_COPY operands > 128 bytes, we have to -// store them in intenal buffer -bool needsCopiedOperandValues(const neuralnetworks::Model& model) { - for (int sindex = 0; sindex < model.referenced.size() + 1; ++sindex) { - const auto& subgraph = sindex == 0 ? model.main : model.referenced[sindex - 1]; - for (int i = 0; i < subgraph.operands.size(); ++i) { - const auto& operand = subgraph.operands[i]; - - if (operand.lifetime == OperandLifeTime::CONSTANT_COPY) { - if (operand.location.length > - ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES) { - return true; - } - } - } - } - return false; -} - -bool isValid(const Subgraph& subgraph) { - // Either the operand has a known value before model execution begins, or we've seen a writer - // for this operand while walking operands in execution order. Initialize to known operands. - std::vector<bool> operandValueKnown; - operandValueKnown.reserve(subgraph.operands.size()); - std::transform(subgraph.operands.begin(), subgraph.operands.end(), - std::back_inserter(operandValueKnown), [](const Operand& operand) { - return operand.lifetime != OperandLifeTime::TEMPORARY_VARIABLE && - operand.lifetime != OperandLifeTime::SUBGRAPH_OUTPUT; - }); - - // Validate that operations are sorted into execution order. - // - // If there is a cycle in the graph, the operations will not - // appear to be sorted into execution order: Some operation will - // have an input for which operandValueKnown[] is false. - for (size_t i = 0; i < subgraph.operations.size(); ++i) { - const auto& operation = subgraph.operations[i]; - - for (size_t j = 0; j < operation.inputs.size(); ++j) { - const uint32_t k = operation.inputs[j]; - if (!operandValueKnown[k]) { - LOG(ERROR) << "Operation " << i << " input " << j << " (operand " << k - << ") is read before it is written"; - return false; - } - } - - for (size_t j = 0; j < operation.outputs.size(); ++j) { - const uint32_t k = operation.outputs[j]; - // Assuming validateOperations() has not returned an error, we know that this output is - // TEMPORARY_VARIABLE or MODEL_OUTPUT, and so the only way operandValueKnown[k] can be - // true is if we've already seen a writer for this operand. - if (operandValueKnown[k]) { - LOG(ERROR) << "Operation " << i << " output " << j << " (operand " << k - << ") has already been written"; - return false; - } - operandValueKnown[k] = true; - } - } - - // Verify all operands are written. - for (size_t i = 0; i < subgraph.operands.size(); ++i) { - if (!operandValueKnown[i]) { - LOG(ERROR) << "Operand " << i << " is never written"; - return false; - } - const auto& operand = subgraph.operands[i]; - - if (operand.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { - if (std::find(subgraph.outputIndexes.begin(), subgraph.outputIndexes.end(), i) == - subgraph.outputIndexes.end()) { - LOG(ERROR) << "Op with output liftime, but not on output list: " << i; - return false; - } - } - } - - // Validate input and output lifetime - for (auto index : subgraph.inputIndexes) { - if (subgraph.operands[index].lifetime != OperandLifeTime::SUBGRAPH_INPUT) { - LOG(ERROR) << "Input with index" << index << " has invalid lifetime"; - return false; - } - } - for (auto index : subgraph.outputIndexes) { - if (subgraph.operands[index].lifetime != OperandLifeTime::SUBGRAPH_OUTPUT) { - LOG(ERROR) << "Output with index" << index << " has invalid lifetime"; - return false; - } - } - - // TODO(b/77871786): verify that every operation has at least one output operand that is read? - return true; -} - -} // namespace - -bool isValid(const neuralnetworks::Model& model) { - return (isValid(model.main) && - std::all_of(model.referenced.begin(), model.referenced.end(), - [](const Subgraph& subgraph) { return isValid(subgraph); })); -} - -std::optional<ShimConvertedModel> convertFromHAL(const NnApiSupportLibrary* nnapi, - const neuralnetworks::Model& model, - std::vector<uint8_t>* copiedOperandValues, - ErrorStatus* errorStatus) { - CHECK(copiedOperandValues != nullptr); - - *errorStatus = ErrorStatus::NONE; - - // Using this pulls in OperationResolver and huge chunk of dependencies. - // TODO(172925288): Replace as followup work - // if (!::aidl::android::hardware::neuralnetworks::utils::valid(model)) { - if (!isValid(model)) { - LOG(ERROR) << "Invalid HAL model, failed to convert into SL model"; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return std::nullopt; - } - - std::vector<std::unique_ptr<::android::nn::sl_wrapper::Memory>> memoryPools; - memoryPools.reserve(model.pools.size()); - for (const auto& pool : model.pools) { - std::unique_ptr<::android::nn::sl_wrapper::Memory> memory = convertFromHAL(nnapi, pool); - if (!memory) { - LOG(ERROR) << "Failed to convert HAL memory into SL memory"; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return std::nullopt; - } - memoryPools.push_back(std::move(memory)); - } - - std::vector<std::optional<::android::nn::sl_wrapper::Model>> allModels(model.referenced.size() + - 1); - - if (needsCopiedOperandValues(model)) { - *copiedOperandValues = model.operandValues; - } - - for (size_t i = 0; i < allModels.size(); ++i) { - if (convertSubgraphFromHAL(nnapi, memoryPools, model, &allModels, i, *copiedOperandValues, - errorStatus) == nullptr) { - LOG(ERROR) << "Failed to convert HAL subgraphs into SL subgraphs, index: " << i; - // Error status already set by convertSubgraphFromHAL - return std::nullopt; - } - } - - std::vector<::android::nn::sl_wrapper::Model> result; - result.reserve(allModels.size()); - for (size_t i = 0; i < allModels.size(); ++i) { - if (!allModels[i].has_value()) { - LOG(ERROR) << "Missing SL subgraph"; - *errorStatus = ErrorStatus::INVALID_ARGUMENT; - return std::nullopt; - } - result.push_back(std::move(*allModels[i])); - } - - return ShimConvertedModel{.memory = std::move(memoryPools), .models = std::move(result)}; -} - -std::unique_ptr<::android::nn::sl_wrapper::Memory> convertFromHAL( - const NnApiSupportLibrary* nnapi, const neuralnetworks::Memory& pool) { - using Tag = neuralnetworks::Memory::Tag; - switch (pool.getTag()) { - case Tag::ashmem: { - const auto& ashmem = pool.get<Tag::ashmem>(); - size_t size = ashmem.size; - int fd = ashmem.fd.get(); - - auto memory = std::make_unique<::android::nn::sl_wrapper::Memory>( - nnapi, size, PROT_READ | PROT_WRITE, fd, 0, /*ownsFd=*/false); - if (!memory->isValid()) { - return nullptr; - } - return memory; - } - case Tag::mappableFile: { - const auto& mappableFile = pool.get<Tag::mappableFile>(); - size_t size = mappableFile.length; - int fd = mappableFile.fd.get(); - int prot = mappableFile.prot & (PROT_READ | PROT_WRITE); - size_t offset = mappableFile.offset; - - auto memory = std::make_unique<::android::nn::sl_wrapper::Memory>( - nnapi, size, prot, fd, offset, /*ownsFd=*/false); - if (!memory->isValid()) { - return nullptr; - } - return memory; - } - case Tag::hardwareBuffer: { - const auto& hardwareBuffer = pool.get<Tag::hardwareBuffer>(); - - native_handle_t* handle = ::android::dupFromAidl(hardwareBuffer.handle); - if (handle == nullptr) { - LOG(ERROR) << "Dup of the hardware_buffer_blob memory pool failed"; - return nullptr; - } - const auto handleGuard = ::android::base::make_scope_guard([handle] { - native_handle_close(handle); - native_handle_delete(handle); - }); - for (size_t i = 0; i < handle->numFds; ++i) { - if (handle->data[i] == -1) { - LOG(ERROR) << "Dup of the hardware_buffer_blob memory pool failed"; - return nullptr; - } - } - - const AHardwareBuffer_Desc desc{ - .width = static_cast<uint32_t>(hardwareBuffer.description.width), - .height = static_cast<uint32_t>(hardwareBuffer.description.height), - .layers = static_cast<uint32_t>(hardwareBuffer.description.layers), - .format = static_cast<uint32_t>(hardwareBuffer.description.format), - .usage = static_cast<uint64_t>(hardwareBuffer.description.usage), - .stride = static_cast<uint32_t>(hardwareBuffer.description.stride), - }; - AHardwareBuffer* ahwb = nullptr; - const ::android::status_t status = AHardwareBuffer_createFromHandle( - &desc, handle, AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &ahwb); - if (status != ::android::NO_ERROR) { - LOG(ERROR) << "createFromHandle failed"; - return nullptr; - } - - const bool isBlob = desc.format == AHARDWAREBUFFER_FORMAT_BLOB; - const size_t size = isBlob ? desc.width : 0; - - // Takes ownership of hardwareBuffer, handle gets closed - auto memory = - std::make_unique<::android::nn::sl_wrapper::Memory>(nnapi, ahwb, - /*ownAHB=*/true, size); - if (!memory->isValid()) { - return nullptr; - } - return memory; - } - } - LOG(ERROR) << "Can't convert to SL Memory, unknown pool tag: " << pool.getTag(); - return nullptr; -} - -} // namespace aidl::android::hardware::neuralnetworks
diff --git a/shim_and_sl/ShimDevice.cpp b/shim_and_sl/ShimDevice.cpp deleted file mode 100644 index 06e18d7..0000000 --- a/shim_and_sl/ShimDevice.cpp +++ /dev/null
@@ -1,615 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "ShimDevice" - -#include "ShimDevice.h" - -#include <NeuralNetworks.h> -#include <aidl/android/hardware/neuralnetworks/DataLocation.h> -#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h> -#include <aidl/android/hardware/neuralnetworks/Extension.h> -#include <aidl/android/hardware/neuralnetworks/ExtensionOperandTypeInformation.h> -#include <aidl/android/hardware/neuralnetworks/Memory.h> -#include <aidl/android/hardware/neuralnetworks/NumberOfCacheFiles.h> -#include <aidl/android/hardware/neuralnetworks/OperandLifeTime.h> -#include <aidl/android/hardware/neuralnetworks/OperandPerformance.h> -#include <android-base/logging.h> -#include <android-base/scopeguard.h> -#include <android/binder_auto_utils.h> -#include <android/binder_manager.h> -#include <android/binder_process.h> -#include <nnapi/TypeUtils.h> -#include <nnapi/hal/aidl/Conversions.h> - -#include <algorithm> -#include <limits> -#include <memory> -#include <optional> -#include <string> -#include <utility> -#include <vector> - -#include "ShimConverter.h" -#include "ShimPreparedModel.h" -#include "ShimUtils.h" -#include "SupportLibrary.h" - -using namespace ::android::nn::sl_wrapper; - -namespace aidl::android::hardware::neuralnetworks { - -namespace { - -constexpr std::optional<::android::nn::wrapper::ExecutePriority> convertToNDKPriority( - Priority priority) { - switch (priority) { - case Priority::LOW: - return ::android::nn::wrapper::ExecutePriority::LOW; - case Priority::MEDIUM: - return ::android::nn::wrapper::ExecutePriority::MEDIUM; - case Priority::HIGH: - return ::android::nn::wrapper::ExecutePriority::HIGH; - } - LOG(ERROR) << "unrecognized priority: " << static_cast<int32_t>(priority); - return std::nullopt; -} - -constexpr std::optional<::android::nn::wrapper::ExecutePreference> convertToNDKPreference( - ExecutionPreference preference) { - switch (preference) { - case ExecutionPreference::LOW_POWER: - return ::android::nn::wrapper::ExecutePreference::PREFER_LOW_POWER; - case ExecutionPreference::FAST_SINGLE_ANSWER: - return ::android::nn::wrapper::ExecutePreference::PREFER_FAST_SINGLE_ANSWER; - case ExecutionPreference::SUSTAINED_SPEED: - return ::android::nn::wrapper::ExecutePreference::PREFER_SUSTAINED_SPEED; - } - LOG(ERROR) << "unrecognized preference: " << static_cast<int32_t>(preference); - return std::nullopt; -} - -// Safely downcast an IPreparedModel object to ShimPreparedModel. -// This function will return nullptr if the IPreparedModel object is not originated from the -// shim process. -const ShimPreparedModel* castToShimPreparedModel(IPreparedModel* preparedModel) { - if (preparedModel->isRemote()) { - return nullptr; - } - // This static_cast is safe because ShimPreparedModel is the only class that implements - // the IPreparedModel interface in the sample driver process. - return static_cast<const ShimPreparedModel*>(preparedModel); -} - -static PerformanceInfo convertPerformanceInfo(const SL_ANeuralNetworksPerformanceInfo& info) { - return {.execTime = info.execTime, .powerUsage = info.powerUsage}; -} - -Capabilities getCapabilities(const NnApiSupportLibrary* nnapi, ANeuralNetworksDevice* device) { - Capabilities capabilities; - SL_ANeuralNetworksPerformanceInfo performanceInfo; - - nnapi->SL_ANeuralNetworksDevice_getPerformanceInfo( - device, SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_RELAXED_SCALAR, &performanceInfo); - capabilities.relaxedFloat32toFloat16PerformanceScalar = convertPerformanceInfo(performanceInfo); - - nnapi->SL_ANeuralNetworksDevice_getPerformanceInfo( - device, SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_RELAXED_TENSOR, &performanceInfo); - capabilities.relaxedFloat32toFloat16PerformanceTensor = convertPerformanceInfo(performanceInfo); - - nnapi->SL_ANeuralNetworksDevice_getPerformanceInfo( - device, SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_IF, &performanceInfo); - capabilities.ifPerformance = convertPerformanceInfo(performanceInfo); - - nnapi->SL_ANeuralNetworksDevice_getPerformanceInfo( - device, SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_WHILE, &performanceInfo); - capabilities.whilePerformance = convertPerformanceInfo(performanceInfo); - - constexpr auto fn = [](SL_ANeuralNetworksOperandPerformanceInfo info, void* context) { - auto* out = static_cast<std::vector<OperandPerformance>*>(context); - out->push_back(OperandPerformance{ - .type = static_cast<OperandType>(info.operandType), - .info = convertPerformanceInfo(info.performanceInfo), - }); - }; - - nnapi->SL_ANeuralNetworksDevice_forEachOperandPerformanceInfo( - device, static_cast<void*>(&capabilities.operandPerformance), fn); - - return capabilities; -} - -NumberOfCacheFiles getNumberOfCacheFilesNeeded(const NnApiSupportLibrary* nnapi, - ANeuralNetworksDevice* device) { - uint32_t numModelCacheFiles; - uint32_t numDataCacheFiles; - nnapi->SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded(device, &numModelCacheFiles, - &numDataCacheFiles); - return { - .numModelCache = static_cast<int32_t>(numModelCacheFiles), - .numDataCache = static_cast<int32_t>(numDataCacheFiles), - }; -} - -std::vector<Extension> getVendorExtensions(const NnApiSupportLibrary* nnapi, - ANeuralNetworksDevice* device) { - uint32_t vendorExtensionCount; - nnapi->SL_ANeuralNetworksDevice_getVendorExtensionCount(device, &vendorExtensionCount); - - std::vector<Extension> extensions(vendorExtensionCount); - - for (uint32_t vendorExtensionIndex = 0; vendorExtensionIndex < vendorExtensionCount; - ++vendorExtensionIndex) { - auto& extension = extensions[vendorExtensionIndex]; - - const char* extensionName; - nnapi->SL_ANeuralNetworksDevice_getVendorExtensionName(device, vendorExtensionIndex, - &extensionName); - extension.name = extensionName; - - constexpr auto fn = [](SL_ANeuralNetworksExtensionOperandTypeInformation info, - void* context) { - auto* out = static_cast<std::vector<ExtensionOperandTypeInformation>*>(context); - out->push_back(ExtensionOperandTypeInformation{ - .type = info.type, - .isTensor = info.isTensor, - .byteSize = static_cast<int32_t>(info.byteSize), - }); - }; - nnapi->SL_ANeuralNetworksDevice_forEachVendorExtensionOperandTypeInformation( - device, vendorExtensionIndex, static_cast<void*>(&extension.operandTypes), fn); - } - - return extensions; -} - -} // namespace - -ShimDevice::ShimDevice(std::shared_ptr<const NnApiSupportLibrary> nnapi, - ANeuralNetworksDevice* device, std::string serviceName) - : mNnapi(std::move(nnapi)), - mBufferTracker(ShimBufferTracker::create()), - mServiceName(std::move(serviceName)), - mDevice(device), - mCapabilities(neuralnetworks::getCapabilities(mNnapi.get(), mDevice)), - mNumberOfCacheFiles(neuralnetworks::getNumberOfCacheFilesNeeded(mNnapi.get(), mDevice)), - mExtensions(neuralnetworks::getVendorExtensions(mNnapi.get(), mDevice)) {} - -// Manages the data buffer for an operand. -class ShimBuffer : public BnBuffer { - public: - ShimBuffer(const NnApiSupportLibrary* nnApi, const ::android::nn::Dimensions initialDimensions, - const ::android::nn::OperandType type, - std::shared_ptr<::android::nn::sl_wrapper::Memory> memory, - std::unique_ptr<ShimBufferTracker::Token> token) - : kInitialDimensions(initialDimensions), - kType(type), - mNnApi(nnApi), - mMemory(std::move(memory)), - kToken(std::move(token)) {} - - bool tensorHasUnspecifiedDimensions(::android::nn::OperandType type, - const ::android::nn::Dimensions& dimensions) { - if (!::android::nn::isExtension(type)) { - if (isNonExtensionScalar(type)) { - return false; - } - } - return dimensions.size() == 0 || std::any_of(dimensions.begin(), dimensions.end(), - [](int32_t dim) { return dim == 0; }); - } - - bool validateDimensions(const ::android::nn::Dimensions& dimensions) { - if (isNonExtensionScalar(kType)) { - if (!dimensions.empty()) { - LOG(ERROR) << "ShimBuffer::validateDimensions -- invalid dimensions for scalar " - "operand"; - return false; - } - return true; - } - - if (dimensions.empty()) { - if (tensorHasUnspecifiedDimensions(kType, kInitialDimensions)) { - LOG(ERROR) << "ShimBuffer::validateDimensions -- the initial dimensions are not " - "fully specified and no dimension update is provided: "; - - return false; - } - } else { - if (tensorHasUnspecifiedDimensions(kType, dimensions)) { - LOG(ERROR) << "ShimBuffer::validateDimensions -- the updated dimensions are not " - "fully specified: "; - - return false; - } - } - - const auto combined = ::android::nn::combineDimensions(kInitialDimensions, dimensions); - if (!combined.has_value()) { - LOG(ERROR) << "ShimBuffer::validateDimensions -- incompatible dimensions"; - return false; - } - return true; - } - - ndk::ScopedAStatus copyFrom(const aidl::android::hardware::neuralnetworks::Memory& src, - const std::vector<int32_t>& dimensions) override { - auto memory = convertFromHAL(mNnApi, src); - - if (!memory) { - LOG(ERROR) << "Failed to convert HAL Memory to SL memory"; - return toAStatus(ErrorStatus::INVALID_ARGUMENT); - } - const auto unsignedDimensions = ::android::nn::toUnsigned(dimensions); - if (!unsignedDimensions.has_value()) { - return toAStatus(aidl_hal::ErrorStatus::INVALID_ARGUMENT, - unsignedDimensions.error().message); - } - - if (!validateDimensions(unsignedDimensions.value())) { - LOG(ERROR) << "Invalid dimensions"; - return toAStatus(ErrorStatus::INVALID_ARGUMENT); - } - Result result = memory->copyTo(*mMemory.get()); - - // Special case expected error status for uninitialized source memory - if (result == Result::BAD_DATA) { - // NNAPI Runtime reports both uninitialized memory - // and incompatible dimensions as BAD_DATA, but - // VTS expects to see INVALID_ARGUMENT for bad dimensions, - // and GENERAL_FAILURE for uninitialized memory. - if (memory->getSize() != mMemory->getSize()) { - return toAStatus(ErrorStatus::INVALID_ARGUMENT, "Incompatible sizes"); - } - - return toAStatus(ErrorStatus::GENERAL_FAILURE); - } - SLW2SAS_RETURN_IF_ERROR(result); - return ndk::ScopedAStatus::ok(); - } - - ndk::ScopedAStatus copyTo(const Memory& dst) override { - auto memory = convertFromHAL(mNnApi, dst); - - if (!memory) { - LOG(ERROR) << "Failed to convert HAL Memory to SL memory"; - return toAStatus(ErrorStatus::INVALID_ARGUMENT); - } - - Result result = mMemory->copyTo(*memory); - // Special case expected error status for uninitialized source memory - if (result == Result::BAD_DATA) { - // NNAPI Runtime reports both uninitialized memory - // and incompatible dimensions as BAD_DATA, but - // VTS expects to see INVALID_ARGUMENT for bad dimensions, - // and GENERAL_FAILURE for uninitialized memory. - if (memory->getSize() != mMemory->getSize()) { - return toAStatus(ErrorStatus::INVALID_ARGUMENT, "Incompatible sizes"); - } - return toAStatus(ErrorStatus::GENERAL_FAILURE); - } - SLW2SAS_RETURN_IF_ERROR(result); - return ndk::ScopedAStatus::ok(); - } - - private: - const ::android::nn::Dimensions kInitialDimensions; - const ::android::nn::OperandType kType; - - const NnApiSupportLibrary* mNnApi; - std::shared_ptr<::android::nn::sl_wrapper::Memory> mMemory; - const std::unique_ptr<ShimBufferTracker::Token> kToken; -}; - -::ndk::ScopedAStatus ShimDevice::allocate(const BufferDesc& desc, - const std::vector<IPreparedModelParcel>& preparedModels, - const std::vector<BufferRole>& inputRoles, - const std::vector<BufferRole>& outputRoles, - DeviceBuffer* buffer) { - if (!isValidDimension(desc.dimensions)) { - LOG(ERROR) << "ShimDriver::allocate -- passed invalid dimension values."; - return toAStatus(ErrorStatus::INVALID_ARGUMENT, - "ShimDriver::allocate -- passed invalid dimension values"); - } - ANeuralNetworksMemoryDesc* slDesc = nullptr; - mNnapi->ANeuralNetworksMemoryDesc_create(&slDesc); - const auto slDescGuard = ::android::base::make_scope_guard( - [this, slDesc] { mNnapi->ANeuralNetworksMemoryDesc_free(slDesc); }); - - auto unsignedDimensions = ::android::nn::toUnsigned(desc.dimensions).value(); - if (mNnapi->ANeuralNetworksMemoryDesc_setDimensions(slDesc, desc.dimensions.size(), - unsignedDimensions.data()) != - ANEURALNETWORKS_NO_ERROR) { - LOG(ERROR) << "ShimDriver::allocate -- ANeuralNetworksMemoryDesc_setDimensions fail."; - return toAStatus(ErrorStatus::INVALID_ARGUMENT, - "ShimDriver::allocate -- ANeuralNetworksMemoryDesc_setDimensions fail"); - } - - constexpr auto getCompilation = [](IPreparedModel* preparedModel) -> const ShimPreparedModel* { - const auto* samplePreparedModel = castToShimPreparedModel(preparedModel); - if (samplePreparedModel == nullptr) { - LOG(ERROR) << "ShimDriver::allocate -- unknown remote IPreparedModel."; - return nullptr; - } - return samplePreparedModel; - }; - - std::optional<::android::nn::OperandType> type; - std::vector<uint32_t> dimensions = ::android::nn::toUnsigned(desc.dimensions).value(); - - for (const auto& role : inputRoles) { - if (role.modelIndex < 0 || role.modelIndex >= preparedModels.size()) { - LOG(ERROR) << "Invalid modelIndex value " << role.modelIndex; - return toAStatus(ErrorStatus::INVALID_ARGUMENT, - "ShimDriver::allocate -- Input role modeIndex with invalid value"); - } - auto preparedModel = preparedModels[role.modelIndex]; - if (preparedModel.preparedModel == nullptr) { - return toAStatus(ErrorStatus::INVALID_ARGUMENT, - "ShimDriver::allocate -- nullptr model"); - } - - auto pmodel = getCompilation(preparedModel.preparedModel.get()); - if (pmodel == nullptr) { - return toAStatus(ErrorStatus::INVALID_ARGUMENT, - "ShimDriver::allocate -- nullptr model"); - } - - auto result = mNnapi->ANeuralNetworksMemoryDesc_addInputRole( - slDesc, pmodel->getCompilation().getHandle(), role.ioIndex, role.probability); - - if (result != ANEURALNETWORKS_NO_ERROR) { - LOG(ERROR) << "SampleDriver::allocate -- ANeuralNetworksMemoryDesc_addInputRole fail."; - return toAStatus(ErrorStatus::INVALID_ARGUMENT, - "ShimDriver::allocate -- ANeuralNetworksMemoryDesc_addInputRole fail"); - } - - const auto& model = pmodel->getMainModel(); - const auto& op = model.getOperands()[model.getInputs()[role.ioIndex]]; - auto operandType = static_cast<::android::nn::OperandType>(op.operandType.type); - if (!type) { - type = operandType; - } - if (dimensions.empty()) { - dimensions = op.dimensions; - } - } - - for (const auto& role : outputRoles) { - if (role.modelIndex < 0 || role.modelIndex >= preparedModels.size()) { - LOG(ERROR) << "Invalid modelIndex value " << role.modelIndex; - return toAStatus(ErrorStatus::INVALID_ARGUMENT, - "ShimDriver::allocate -- Ou0tput role modeIndex with invalid value"); - } - auto preparedModel = preparedModels[role.modelIndex]; - if (preparedModel.preparedModel == nullptr) { - return toAStatus(ErrorStatus::INVALID_ARGUMENT, - "ShimDriver::allocate -- nullptr model"); - } - - auto pmodel = getCompilation(preparedModel.preparedModel.get()); - if (pmodel == nullptr) { - return toAStatus(ErrorStatus::INVALID_ARGUMENT, - "ShimDriver::allocate -- nullptr model"); - } - - auto result = mNnapi->ANeuralNetworksMemoryDesc_addOutputRole( - slDesc, pmodel->getCompilation().getHandle(), role.ioIndex, role.probability); - - if (result != ANEURALNETWORKS_NO_ERROR) { - LOG(ERROR) << "SampleDriver::allocate -- ANeuralNetworksMemoryDesc_addInputRole fail."; - return toAStatus(ErrorStatus::INVALID_ARGUMENT, - "ShimDriver::allocate -- ANeuralNetworksMemoryDesc_addInputRole fail"); - } - const auto& model = pmodel->getMainModel(); - const auto& op = model.getOperands()[model.getInputs()[role.ioIndex]]; - auto operandType = static_cast<::android::nn::OperandType>(op.operandType.type); - if (!type) { - type = operandType; - } - if (dimensions.empty()) { - dimensions = op.dimensions; - } - } - - auto typeSize = ::android::nn::getNonExtensionSize(*type, dimensions); - if (!typeSize.has_value()) { - return toAStatus(ErrorStatus::INVALID_ARGUMENT, - "ShimDriver::allocate -- failed to get underlying type size, " - "possibly an extension type"); - } - - mNnapi->ANeuralNetworksMemoryDesc_finish(slDesc); - auto memory = - std::make_shared<::android::nn::sl_wrapper::Memory>(mNnapi.get(), slDesc, *typeSize); - - if (!memory->isValid()) { - LOG(ERROR) << "ShimDriver::allocate -- ANeuralNetworksMemory_createFromDesc failed."; - return toAStatus(ErrorStatus::GENERAL_FAILURE, - "ShimDriver::allocate -- ANeuralNetworksMemory_createFromDesc failed"); - } - - auto token = mBufferTracker->add(memory); - if (token == nullptr) { - LOG(ERROR) << "ShimDriver::allocate -- ShimBufferTracker returned invalid token."; - return toAStatus(ErrorStatus::GENERAL_FAILURE, - "ShimDriver::allocate -- ShimBufferTracker returned invalid token."); - } - const uint32_t tokenValue = token->get(); - auto shimbuffer = ndk::SharedRefBase::make<ShimBuffer>(mNnapi.get(), dimensions, *type, - std::move(memory), std::move(token)); - buffer->buffer = std::move(shimbuffer); - buffer->token = tokenValue; - - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus ShimDevice::getCapabilities(Capabilities* capabilities) { - *capabilities = mCapabilities; - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus ShimDevice::getNumberOfCacheFilesNeeded(NumberOfCacheFiles* numberOfCacheFiles) { - *numberOfCacheFiles = mNumberOfCacheFiles; - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus ShimDevice::getSupportedExtensions(std::vector<Extension>* extensions) { - *extensions = mExtensions; - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus ShimDevice::getSupportedOperations(const Model& model, - std::vector<bool>* supportedOperations) { - const auto numOperations = model.main.operations.size(); - supportedOperations->resize(numOperations); - - ErrorStatus convertErrorStatus = ErrorStatus::NONE; - std::vector<uint8_t> copiedOperandValues; - auto modelAndMemory = - convertFromHAL(mNnapi.get(), model, &copiedOperandValues, &convertErrorStatus); - if (!modelAndMemory || modelAndMemory->models.empty()) { - LOG(ERROR) << "Failed to convert HAL model to SL model"; - return toAStatus(convertErrorStatus); - } - - auto annModel = modelAndMemory->models[0].getHandle(); - auto supportedOps = std::make_unique<bool[]>(numOperations); - - auto result = mNnapi->ANeuralNetworksModel_getSupportedOperationsForDevices( - annModel, &mDevice, /*numDevices=*/1, supportedOps.get()); - SLW2SAS_RETURN_IF_ERROR(result); - - std::copy(supportedOps.get(), supportedOps.get() + numOperations, supportedOperations->begin()); - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus ShimDevice::getType(DeviceType* type) { - int32_t deviceType; - auto result = mNnapi->ANeuralNetworksDevice_getType(mDevice, &deviceType); - SLW2SAS_RETURN_IF_ERROR(result); - *type = static_cast<DeviceType>(deviceType); - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus ShimDevice::getVersionString(std::string* versionString) { - const char* buffer; - auto result = mNnapi->ANeuralNetworksDevice_getVersion(mDevice, &buffer); - SLW2SAS_RETURN_IF_ERROR(result); - - *versionString = std::string(buffer); - return ndk::ScopedAStatus::ok(); -} - -static std::vector<int> getIntFds(const std::vector<::ndk::ScopedFileDescriptor>& scopedFds) { - std::vector<int> fds; - fds.reserve(scopedFds.size()); - for (const auto& scopedFd : scopedFds) { - fds.push_back(scopedFd.get()); - } - return fds; -} - -ndk::ScopedAStatus ShimDevice::prepareModel( - const Model& model, ExecutionPreference preference, Priority priority, int64_t deadlineNs, - const std::vector<::ndk::ScopedFileDescriptor>& modelCache, - const std::vector<::ndk::ScopedFileDescriptor>& dataCache, - const std::vector<uint8_t>& token, - const std::shared_ptr<IPreparedModelCallback>& callback) { - // TODO(183398748): Run model preparation in detached thread. - if (callback == nullptr) { - return toAStatus(ErrorStatus::INVALID_ARGUMENT); - } - - auto ndkPreference = convertToNDKPreference(preference); - if (!ndkPreference) { - callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr); - return toAStatus(ErrorStatus::INVALID_ARGUMENT); - } - auto ndkPriority = convertToNDKPriority(priority); - if (!ndkPriority) { - callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr); - return toAStatus(ErrorStatus::INVALID_ARGUMENT); - } - - ErrorStatus convertErrorStatus = ErrorStatus::NONE; - std::vector<uint8_t> copiedOperandValues; - auto modelAndMemory = - convertFromHAL(mNnapi.get(), model, &copiedOperandValues, &convertErrorStatus); - - if (!modelAndMemory || modelAndMemory->models.empty()) { - callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr); - return toAStatus(convertErrorStatus); - } - - // b/185976051, past this point we pretend that compilation is asynchronous, and in - /// case of error we return OK status, but communicate the error through the callback. - auto compilation = ::android::nn::sl_wrapper::Compilation::createForDevice( - mNnapi.get(), &modelAndMemory->models[0], mDevice); - - SLW2SAS_OK_RETURN_AND_ERROR_CALLBACK_IF_ERROR(compilation.first, callback); - SLW2SAS_OK_RETURN_AND_ERROR_CALLBACK_IF_ERROR(compilation.second.setPreference(*ndkPreference), - callback); - SLW2SAS_OK_RETURN_AND_ERROR_CALLBACK_IF_ERROR(compilation.second.setPriority(*ndkPriority), - callback); - if (deadlineNs > -1) { - std::chrono::time_point<::android::base::boot_clock> deadlinePoint( - std::chrono::nanoseconds{deadlineNs}); - const auto currentTime = ::android::base::boot_clock::now(); - const auto timeoutDuration = std::chrono::nanoseconds(deadlinePoint - currentTime); - if (timeoutDuration <= std::chrono::nanoseconds::zero()) { - callback->notify(ErrorStatus::MISSED_DEADLINE_TRANSIENT, nullptr); - return ndk::ScopedAStatus::ok(); - } - SLW2SAS_OK_RETURN_AND_ERROR_CALLBACK_IF_ERROR( - compilation.second.setTimeout(std::max<uint64_t>(1, timeoutDuration.count())), - callback); - } - if (!modelCache.empty() || !dataCache.empty()) { - SLW2SAS_OK_RETURN_AND_ERROR_CALLBACK_IF_ERROR( - compilation.second.setCachingFromFds(getIntFds(modelCache), getIntFds(dataCache), - token), - callback); - } - SLW2SAS_OK_RETURN_AND_ERROR_CALLBACK_IF_ERROR(compilation.second.finish(), callback); - - const std::shared_ptr<ShimPreparedModel> preparedModel = - ndk::SharedRefBase::make<ShimPreparedModel>( - mNnapi, mBufferTracker, std::move(compilation.second), - std::move(modelAndMemory->models), std::move(modelAndMemory->memory), - std::move(copiedOperandValues)); - - callback->notify(ErrorStatus::NONE, preparedModel); - return ndk::ScopedAStatus::ok(); -} - -ndk::ScopedAStatus ShimDevice::prepareModelFromCache( - int64_t /*deadlineNs*/, const std::vector<::ndk::ScopedFileDescriptor>& /*modelCache*/, - const std::vector<::ndk::ScopedFileDescriptor>& /*dataCache*/, - const std::vector<uint8_t>& /*token*/, - const std::shared_ptr<IPreparedModelCallback>& callback) { - // The NNAPI runtime will attempt to call this before falling back to - // ShimDevice::prepareModel(). This is not a LOG(ERROR) to avoid producing - // misleading logcat messages on every compilation request because there is - // technically nothing wrong. - LOG(DEBUG) << "ShimDevice::prepareModelFromCache() is not supported. Use " - "ShimDevice::prepareModel() instead."; - const auto ret = callback->notify(ErrorStatus::GENERAL_FAILURE, nullptr); - return toAStatus(ErrorStatus::GENERAL_FAILURE); -} - -} // namespace aidl::android::hardware::neuralnetworks
diff --git a/shim_and_sl/ShimDeviceManager.cpp b/shim_and_sl/ShimDeviceManager.cpp deleted file mode 100644 index 6ea58de..0000000 --- a/shim_and_sl/ShimDeviceManager.cpp +++ /dev/null
@@ -1,203 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "ShimDeviceManager" - -#include "ShimDeviceManager.h" - -#include <AndroidVersionUtil.h> -#include <aidl/android/hardware/neuralnetworks/IDevice.h> -#include <android-base/logging.h> -#include <android/binder_manager.h> -#include <android/binder_process.h> -#include <nnapi/hal/aidl/InvalidDevice.h> - -#include <algorithm> -#include <memory> -#include <string> -#include <unordered_map> -#include <utility> -#include <vector> - -#include "NeuralNetworksShim.h" -#include "ShimDevice.h" -#include "ShimUtils.h" -#include "SupportLibrary.h" - -namespace android::neuralnetworks::shim { -namespace { - -using aidl::android::hardware::neuralnetworks::IDevice; -using aidl::android::hardware::neuralnetworks::InvalidDevice; -using aidl::android::hardware::neuralnetworks::ShimDevice; - -ANeuralNetworksShimResultCode registerEagerService(const std::shared_ptr<IDevice>& device, - const std::string& name) { - const binder_exception_t status = - AServiceManager_addService(device->asBinder().get(), name.c_str()); - if (status != EX_NONE) { - LOG(ERROR) << "AServiceManager_addService failed for " << name << ", error code " << status; - return ANNSHIM_FAILED_TO_REGISTER_SERVICE; - } - return ANNSHIM_NO_ERROR; -} - -ANeuralNetworksShimResultCode registerLazyService(const std::shared_ptr<IDevice>& device, - const std::string& name) { - if (__builtin_available(android __NNAPI_AIDL_MIN_ANDROID_API__, *)) { - const binder_status_t status = - AServiceManager_registerLazyService(device->asBinder().get(), name.c_str()); - if (status != STATUS_OK) { - LOG(ERROR) << "Service registration failed for " << name << ", error code " << status; - return ANNSHIM_FAILED_TO_REGISTER_SERVICE; - } - return ANNSHIM_NO_ERROR; - } - LOG(ERROR) << "Service registration failed for " << name - << " because AServiceManager_registerLazyService is not supported until API " - "level 31"; - return ANNSHIM_FAILED_TO_REGISTER_SERVICE; -} - -ANeuralNetworksShimResultCode registerService(const std::shared_ptr<IDevice>& device, - const std::string& name, bool registerAsLazy) { - const std::string instance = std::string(ShimDevice::descriptor) + "/" + name; - LOG(INFO) << "Attempting service registration for " << instance; - return registerAsLazy ? registerLazyService(device, instance) - : registerEagerService(device, instance); -} - -std::unordered_map<std::string, ANeuralNetworksDevice*> getNamedDevices( - const std::shared_ptr<const NnApiSupportLibrary>& nnapi) { - uint32_t numDevices; - if (nnapi->ANeuralNetworks_getDeviceCount(&numDevices) != ANEURALNETWORKS_NO_ERROR) { - LOG(ERROR) << "Failed ANeuralNetworks_getDeviceCount"; - return {}; - } - - std::unordered_map<std::string, ANeuralNetworksDevice*> nameToDevice; - for (uint32_t i = 0; i < numDevices; ++i) { - ANeuralNetworksDevice* device; - if (nnapi->ANeuralNetworks_getDevice(i, &device) != ANEURALNETWORKS_NO_ERROR) { - LOG(ERROR) << "Failed ANeuralNetworks_getDevice"; - return {}; - } - - const char* name = nullptr; - if (nnapi->ANeuralNetworksDevice_getName(device, &name) != ANEURALNETWORKS_NO_ERROR) { - LOG(ERROR) << "Failed ANeuralNetworks_getName"; - return {}; - } - - nameToDevice.emplace(name, device); - } - - return nameToDevice; -} - -} // namespace - -ANeuralNetworksShimResultCode registerDevices(NnApiSLDriverImpl* nnapiSLImpl, - const std::vector<ShimDeviceInfo>& devicesToRegister, - uint32_t numberOfListenerThreads, - bool registerAsLazyService, - bool fallbackToMinimumSupportDevice) { - if (nnapiSLImpl == nullptr) { - LOG(ERROR) << "Invalid arguments, nnapiSLImpl == nullptr "; - return ANNSHIM_INVALID_ARGUMENT; - } - if (devicesToRegister.empty()) { - LOG(ERROR) << "Invalid arguments, devicesToRegister is empty"; - return ANNSHIM_INVALID_ARGUMENT; - } - - if (nnapiSLImpl->implFeatureLevel < ANEURALNETWORKS_FEATURE_LEVEL_5) { - LOG(ERROR) << "Invalid implStructFeatureLevel if NnApiSLDriverImpl, has to be at least " - "ANEURALNETWORKS_FEATURE_LEVEL_5"; - return ANNSHIM_FAILED_TO_LOAD_SL; - } - - if (nnapiSLImpl->implFeatureLevel > ANEURALNETWORKS_FEATURE_LEVEL_5) { - LOG(ERROR) << "Invalid implStructFeatureLevel if NnApiSLDriverImpl, latest supported " - "version is ANEURALNETWORKS_FEATURE_LEVEL_5"; - return ANNSHIM_FAILED_TO_LOAD_SL; - } - - const std::shared_ptr<const NnApiSupportLibrary> nnapi = - std::make_shared<const NnApiSupportLibrary>( - *reinterpret_cast<NnApiSLDriverImplFL5*>(nnapiSLImpl), nullptr); - - ABinderProcess_setThreadPoolMaxThreadCount(numberOfListenerThreads); - - const auto nameToDevice = getNamedDevices(nnapi); - std::vector<std::shared_ptr<IDevice>> devices; - devices.reserve(devicesToRegister.size()); - - // Convert all supplied devices to AIDL IDevice interfaces. - for (const auto& info : devicesToRegister) { - const auto& name = info.deviceName; - - if (const auto iter = nameToDevice.find(name); iter != nameToDevice.end()) { - ANeuralNetworksDevice* device = iter->second; - - auto shimDevice = ndk::SharedRefBase::make<ShimDevice>(nnapi, device, info.serviceName); - devices.push_back(std::move(shimDevice)); - continue; - } - - if (!fallbackToMinimumSupportDevice) { - LOG(ERROR) << "NNAPI device " << name - << " was not found in the support library package, and falling back to a " - "minimum support device was not specified"; - return ANNSHIM_FAILED_TO_REGISTER_SERVICE; - } - - // If the device was not found in the support library package, and falling back on a - // minimum support device is allowed, construct a minimum support device. - LOG(INFO) << "NNAPI device " << name - << " was not found in the support library package, and falling back to a " - "minimal support device is allowed, so a minimal support device " - "is being registered instead."; - devices.push_back(InvalidDevice::create()); - } - - CHECK_EQ(devices.size(), devicesToRegister.size()); - - // Register all AIDL IDevice interfaces. - for (size_t i = 0; i < devicesToRegister.size(); i++) { - const auto& info = devicesToRegister[i]; - const auto& device = devices[i]; - - const auto registrationResult = - registerService(device, info.serviceName, registerAsLazyService); - if (registrationResult != ANNSHIM_NO_ERROR) { - // This will only fail if there is a problem with Binder or if there is a mismatch - // between the service being registered and the service listed on the device manifest. - // Falling back to a minimum support device would not help resolve this whatever - // mismatch may exist, so there is no fallback path at this stage. - return registrationResult; - } - } - - LOG(INFO) << devices.size() << " NNAPI Devices/services registered, blocking"; - ABinderProcess_joinThreadPool(); - - // Shouldn't reach here. - LOG(ERROR) << "ABinderProcess_joinThreadPool unexpected returned in registerDevices."; - return ANNSHIM_GENERAL_ERROR; -} - -} // namespace android::neuralnetworks::shim
diff --git a/shim_and_sl/ShimPreparedModel.cpp b/shim_and_sl/ShimPreparedModel.cpp deleted file mode 100644 index f29804b..0000000 --- a/shim_and_sl/ShimPreparedModel.cpp +++ /dev/null
@@ -1,404 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ShimPreparedModel.h" - -#include <aidl/android/hardware/neuralnetworks/BnBurst.h> -#include <aidl/android/hardware/neuralnetworks/BnFencedExecutionCallback.h> -#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h> -#include <aidl/android/hardware/neuralnetworks/OutputShape.h> -#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h> -#include <android-base/chrono_utils.h> -#include <android-base/logging.h> -#include <android-base/scopeguard.h> -#include <android/binder_auto_utils.h> -#include <nnapi/TypeUtils.h> -#include <nnapi/hal/aidl/Conversions.h> - -#include <algorithm> -#include <chrono> -#include <limits> -#include <memory> -#include <thread> -#include <utility> -#include <vector> - -#include "ShimConverter.h" -#include "ShimUtils.h" - -namespace aidl::android::hardware::neuralnetworks { - -ErrorStatus ShimPreparedModel::parseInputs( - const Request& request, bool measure, int64_t deadlineNs, int64_t loopTimeoutDurationNs, - ::android::nn::sl_wrapper::Execution* execution, - std::vector<std::shared_ptr<::android::nn::sl_wrapper::Memory>>* requestMemoryPools) { - for (const auto& requestPool : request.pools) { - switch (requestPool.getTag()) { - case RequestMemoryPool::pool: { - const auto& memoryPool = requestPool.get<RequestMemoryPool::pool>(); - std::shared_ptr<::android::nn::sl_wrapper::Memory> mem = - convertFromHAL(mNnapi.get(), memoryPool); - if (!mem) { - LOG(ERROR) << "Failed to convert request HAL memory pools into SL memory"; - return ErrorStatus::INVALID_ARGUMENT; - } - - requestMemoryPools->push_back(mem); - break; - } - case RequestMemoryPool::token: { - int token = requestPool.get<RequestMemoryPool::token>(); - - auto memory = mBufferTracker->get(static_cast<uint32_t>(token)); - if (memory == nullptr) { - return ErrorStatus::INVALID_ARGUMENT; - } - - requestMemoryPools->push_back(memory); - break; - } - } - } - - const auto& model = mMainAndReferencedModels[0]; - // set inputs - for (int i = 0; i < request.inputs.size(); ++i) { - const auto& input = request.inputs[i]; - ::android::nn::wrapper::OperandType operandType = model.getOperands()[model.getInputs()[i]]; - if (!input.hasNoValue) { - if (input.dimensions.size() > 0) { - operandType.updateDimensions(::android::nn::toUnsigned(input.dimensions).value()); - } - auto result = execution->setInputFromMemory( - i, requestMemoryPools->at(input.location.poolIndex).get(), - input.location.offset, input.location.length, &operandType.operandType); - if (result != Result::NO_ERROR) { - return convertResultToErrorStatus(result); - } - } else { - auto result = execution->setInput(i, nullptr, 0); - if (result != Result::NO_ERROR) { - return convertResultToErrorStatus(result); - } - } - } - - // set outputs - for (int i = 0; i < request.outputs.size(); ++i) { - const auto& output = request.outputs[i]; - ::android::nn::wrapper::OperandType operandType = - model.getOperands()[model.getOutputs()[i]]; - - if (!output.hasNoValue) { - if (output.dimensions.size() > 0) { - operandType.updateDimensions(::android::nn::toUnsigned(output.dimensions).value()); - } - auto result = execution->setOutputFromMemory( - i, requestMemoryPools->at(output.location.poolIndex).get(), - output.location.offset, output.location.length, &operandType.operandType); - if (result != Result::NO_ERROR) { - return convertResultToErrorStatus(result); - } - } else { - auto result = execution->setOutput(i, nullptr, 0); - if (result != Result::NO_ERROR) { - return convertResultToErrorStatus(result); - } - } - } - - if (measure) { - execution->setMeasureTiming(true); - } - - if (deadlineNs > -1) { - std::chrono::time_point<::android::base::boot_clock> deadlinePoint( - std::chrono::nanoseconds{deadlineNs}); - const auto currentTime = ::android::base::boot_clock::now(); - const auto timeoutDuration = std::chrono::nanoseconds(deadlinePoint - currentTime); - if (timeoutDuration <= std::chrono::nanoseconds::zero()) { - return ErrorStatus::MISSED_DEADLINE_TRANSIENT; - } else { - auto result = execution->setTimeout(std::max<uint64_t>(1, timeoutDuration.count())); - if (result != Result::NO_ERROR) { - return convertResultToErrorStatus(result); - } - } - } - - if (loopTimeoutDurationNs > 0) { - execution->setLoopTimeout(loopTimeoutDurationNs); - } - return ErrorStatus::NONE; -} - -class ShimFencedExecutionCallback : public BnFencedExecutionCallback { - public: - ShimFencedExecutionCallback( - ::android::nn::sl_wrapper::Execution execution, Event e, - std::vector<std::shared_ptr<::android::nn::sl_wrapper::Memory>> memoryPools, - bool measureTiming) - : mMemoryPools(std::move(memoryPools)), - mExecution(std::move(execution)), - mEvent(std::move(e)), - mMeasureTiming(measureTiming) {} - - ndk::ScopedAStatus getExecutionInfo(Timing* timingLaunched, Timing* timingFenced, - ErrorStatus* errorStatus) override { - auto status = mEvent.wait(); - *errorStatus = convertResultToErrorStatus(status); - - if (mMeasureTiming) { - uint64_t duration; - constexpr int64_t int64cap = std::numeric_limits<int64_t>::max(); - // Special value used for "no measurements" - constexpr uint64_t uint64cap = std::numeric_limits<uint64_t>::max(); - auto result = mExecution.getDuration(Duration::ON_HARDWARE, &duration); - SLW2SAS_RETURN_IF_ERROR(result); - timingLaunched->timeOnDeviceNs = - (duration == uint64cap) - ? -1 - : (duration > int64cap) ? int64cap : static_cast<int64_t>(duration); - - result = mExecution.getDuration(Duration::IN_DRIVER, &duration); - SLW2SAS_RETURN_IF_ERROR(result); - timingLaunched->timeInDriverNs = - (duration == uint64cap) - ? -1 - : (duration > int64cap) ? int64cap : static_cast<int64_t>(duration); - - result = mExecution.getDuration(Duration::FENCED_ON_HARDWARE, &duration); - SLW2SAS_RETURN_IF_ERROR(result); - timingFenced->timeOnDeviceNs = - (duration == uint64cap) - ? -1 - : (duration > int64cap) ? int64cap : static_cast<int64_t>(duration); - - result = mExecution.getDuration(Duration::FENCED_IN_DRIVER, &duration); - SLW2SAS_RETURN_IF_ERROR(result); - timingFenced->timeInDriverNs = - (duration == uint64cap) - ? -1 - : (duration > int64cap) ? int64cap : static_cast<int64_t>(duration); - } else { - timingFenced->timeOnDeviceNs = -1; - timingFenced->timeInDriverNs = -1; - timingLaunched->timeOnDeviceNs = -1; - timingLaunched->timeInDriverNs = -1; - } - - return ndk::ScopedAStatus::ok(); - } - - private: - std::vector<std::shared_ptr<::android::nn::sl_wrapper::Memory>> mMemoryPools; - ::android::nn::sl_wrapper::Execution mExecution; - ::android::nn::wrapper::Event mEvent; - bool mMeasureTiming; -}; - -::ndk::ScopedAStatus ShimPreparedModel::executeFenced( - const ::aidl::android::hardware::neuralnetworks::Request& request, - const std::vector<::ndk::ScopedFileDescriptor>& waitFor, bool measureTiming, - int64_t deadlineNs, int64_t loopTimeoutDurationNs, int64_t durationNs, - FencedExecutionResult* fencedExecutionResult) { - if (deadlineNs < -1) { - LOG(ERROR) << "Invalid deadline value, must be >= -1"; - return ndk::ScopedAStatus::fromServiceSpecificError( - static_cast<int>(ErrorStatus::INVALID_ARGUMENT)); - } - auto execution = ::android::nn::sl_wrapper::Execution(mNnapi.get(), &mCompilation); - std::vector<std::shared_ptr<::android::nn::sl_wrapper::Memory>> requestMemoryPools; - auto errorStatus = parseInputs(request, measureTiming, deadlineNs, loopTimeoutDurationNs, - &execution, &requestMemoryPools); - if (errorStatus != ErrorStatus::NONE) { - return toAStatus(errorStatus); - } - - std::vector<const ANeuralNetworksEvent*> deps(waitFor.size()); - auto createResult = Result::NO_ERROR; - std::transform(waitFor.begin(), waitFor.end(), deps.begin(), - [&](const ::ndk::ScopedFileDescriptor& e) { - ANeuralNetworksEvent* r = nullptr; - if (createResult == Result::NO_ERROR) { - createResult = static_cast<Result>( - mNnapi->ANeuralNetworksEvent_createFromSyncFenceFd(e.get(), &r)); - } - return r; - }); - - const auto guard = ::android::base::make_scope_guard([this, deps] { - for (auto& dep : deps) { - if (dep != nullptr) { - mNnapi->ANeuralNetworksEvent_free(const_cast<ANeuralNetworksEvent*>(dep)); - } - } - }); - - SLW2SAS_RETURN_IF_ERROR(createResult); - - Event e(mNnapi.get()); - auto result = execution.startComputeWithDependencies(deps, durationNs, &e); - SLW2SAS_RETURN_IF_ERROR(result); - - int syncFence = -1; - fencedExecutionResult->syncFence = ndk::ScopedFileDescriptor( - (e.getSyncFenceFd(&syncFence) == Result::NO_ERROR) ? syncFence : -1); - fencedExecutionResult->callback = ndk::SharedRefBase::make<ShimFencedExecutionCallback>( - std::move(execution), std::move(e), std::move(requestMemoryPools), measureTiming); - - return ndk::ScopedAStatus::ok(); -} - -::ndk::ScopedAStatus ShimPreparedModel::executeSynchronously( - const Request& request, bool measureTiming, int64_t deadlineNs, - int64_t loopTimeoutDurationNs, - ::aidl::android::hardware::neuralnetworks::ExecutionResult* executionResult) { - if (deadlineNs < -1) { - LOG(ERROR) << "Invalid deadline value, must be >= -1"; - return ndk::ScopedAStatus::fromServiceSpecificError( - static_cast<int>(ErrorStatus::INVALID_ARGUMENT)); - } - - auto execution = - std::make_unique<::android::nn::sl_wrapper::Execution>(mNnapi.get(), &mCompilation); - std::vector<std::shared_ptr<::android::nn::sl_wrapper::Memory>> requestMemoryPools; - auto errorStatus = parseInputs(request, measureTiming, deadlineNs, loopTimeoutDurationNs, - execution.get(), &requestMemoryPools); - if (errorStatus != ErrorStatus::NONE) { - return toAStatus(errorStatus); - } - - auto result = execution->compute(); - errorStatus = convertResultToErrorStatus(result); - - int numOutputs = request.outputs.size(); - std::vector<OutputShape> outputShapes; - outputShapes.reserve(numOutputs); - bool sufficientSize = true; - for (int i = 0; i < numOutputs; ++i) { - OutputShape outputShape; - std::vector<uint32_t> outputDims; - auto result = execution->getOutputOperandDimensions(i, &outputDims); - if (result == Result::NO_ERROR) { - outputShape.isSufficient = true; - outputShape.dimensions.assign(outputDims.begin(), outputDims.end()); - } else if (result == Result::OUTPUT_INSUFFICIENT_SIZE) { - sufficientSize = false; - outputShape.isSufficient = false; - outputShape.dimensions.assign(outputDims.begin(), outputDims.end()); - } else { - if (errorStatus == ErrorStatus::NONE) { - errorStatus = ErrorStatus::GENERAL_FAILURE; - } - } - outputShapes.push_back(std::move(outputShape)); - } - - int64_t timeOnDeviceNs = -1; - int64_t timeInDriverNs = -1; - if (measureTiming && errorStatus == ErrorStatus::NONE) { - uint64_t duration; - constexpr int64_t int64cap = std::numeric_limits<int64_t>::max(); - // Special value used for "no measurements" - constexpr uint64_t uint64cap = std::numeric_limits<uint64_t>::max(); - auto result = execution->getDuration(Duration::ON_HARDWARE, &duration); - SLW2SAS_RETURN_IF_ERROR(result); - timeOnDeviceNs = - (duration == uint64cap) - ? -1 - : (duration > int64cap) ? int64cap : static_cast<int64_t>(duration); - - result = execution->getDuration(Duration::IN_DRIVER, &duration); - SLW2SAS_RETURN_IF_ERROR(result); - timeInDriverNs = - (duration == uint64cap) - ? -1 - : (duration > int64cap) ? int64cap : static_cast<int64_t>(duration); - } - - *executionResult = - ExecutionResult{sufficientSize, - std::move(outputShapes), - {.timeOnDeviceNs = timeOnDeviceNs, .timeInDriverNs = timeInDriverNs}}; - if (errorStatus == ErrorStatus::NONE || errorStatus == ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) { - return ndk::ScopedAStatus::ok(); - } - return toAStatus(errorStatus); -} - -// TODO(183397380): make it use ANNBurst object -class ShimBurst : public BnBurst { - public: - // Precondition: preparedModel != nullptr - explicit ShimBurst(std::shared_ptr<ShimPreparedModel> preparedModel); - - ndk::ScopedAStatus executeSynchronously(const Request& request, - const std::vector<int64_t>& memoryIdentifierTokens, - bool measureTiming, int64_t deadlineNs, - int64_t loopTimeoutDurationNs, - ExecutionResult* executionResult) override; - ndk::ScopedAStatus releaseMemoryResource(int64_t memoryIdentifierToken) override; - - protected: - std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT; - const std::shared_ptr<ShimPreparedModel> kPreparedModel; -}; - -ndk::ScopedAStatus ShimPreparedModel::configureExecutionBurst(std::shared_ptr<IBurst>* burst) { - std::shared_ptr<ShimPreparedModel> self = this->template ref<ShimPreparedModel>(); - *burst = ndk::SharedRefBase::make<ShimBurst>(std::move(self)); - return ndk::ScopedAStatus::ok(); -} - -ShimBurst::ShimBurst(std::shared_ptr<ShimPreparedModel> preparedModel) - : kPreparedModel(std::move(preparedModel)) { - CHECK(kPreparedModel != nullptr); -} - -ndk::ScopedAStatus ShimBurst::executeSynchronously( - const Request& request, const std::vector<int64_t>& memoryIdentifierTokens, - bool measureTiming, int64_t deadlineNs, int64_t loopTimeoutDurationNs, - ExecutionResult* executionResult) { - if (request.pools.size() != memoryIdentifierTokens.size()) { - return toAStatus(ErrorStatus::INVALID_ARGUMENT, - "request.pools.size() != memoryIdentifierTokens.size()"); - } - if (!std::all_of(memoryIdentifierTokens.begin(), memoryIdentifierTokens.end(), - [](int64_t token) { return token >= -1; })) { - return toAStatus(ErrorStatus::INVALID_ARGUMENT, "Invalid memoryIdentifierTokens"); - } - - // Ensure at most one execution is in flight at a time. - const bool executionAlreadyInFlight = mExecutionInFlight.test_and_set(); - if (executionAlreadyInFlight) { - return toAStatus(ErrorStatus::GENERAL_FAILURE, - "Burst object supports at most one execution at a time"); - } - const auto guard = ::android::base::make_scope_guard([this] { mExecutionInFlight.clear(); }); - - return kPreparedModel->executeSynchronously(request, measureTiming, deadlineNs, - loopTimeoutDurationNs, executionResult); -} - -ndk::ScopedAStatus ShimBurst::releaseMemoryResource(int64_t memoryIdentifierToken) { - if (memoryIdentifierToken < -1) { - return toAStatus(ErrorStatus::INVALID_ARGUMENT, "Invalid memoryIdentifierToken"); - } - return ndk::ScopedAStatus::ok(); -} - -} // namespace aidl::android::hardware::neuralnetworks
diff --git a/shim_and_sl/ShimUtils.cpp b/shim_and_sl/ShimUtils.cpp deleted file mode 100644 index 792f1f9..0000000 --- a/shim_and_sl/ShimUtils.cpp +++ /dev/null
@@ -1,78 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "ShimUtils" - -#include "ShimUtils.h" -#include <android-base/logging.h> - -#include <string> -#include <vector> - -namespace aidl::android::hardware::neuralnetworks { - -using ::aidl::android::hardware::neuralnetworks::ErrorStatus; -using ::android::nn::wrapper::Result; - -ndk::ScopedAStatus toAStatus(ErrorStatus errorStatus, const std::string& errorMessage) { - if (errorStatus == ErrorStatus::NONE) { - return ndk::ScopedAStatus::ok(); - } - return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage( - static_cast<int32_t>(errorStatus), errorMessage.c_str()); -} - -ndk::ScopedAStatus toAStatus(ErrorStatus errorStatus) { - if (errorStatus == ErrorStatus::NONE) { - return ndk::ScopedAStatus::ok(); - } - return ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(errorStatus)); -} - -ErrorStatus convertResultToErrorStatus(Result status) { - switch (status) { - case Result::NO_ERROR: - return ErrorStatus::NONE; - case Result::OUTPUT_INSUFFICIENT_SIZE: - return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; - case Result::UNAVAILABLE_DEVICE: - return ErrorStatus::DEVICE_UNAVAILABLE; - case Result::MISSED_DEADLINE_TRANSIENT: - return ErrorStatus::MISSED_DEADLINE_TRANSIENT; - case Result::MISSED_DEADLINE_PERSISTENT: - return ErrorStatus::MISSED_DEADLINE_PERSISTENT; - case Result::BAD_DATA: - case Result::INCOMPLETE: - case Result::UNEXPECTED_NULL: - case Result::UNMAPPABLE: - case Result::OUT_OF_MEMORY: - case Result::BAD_STATE: - return ErrorStatus::INVALID_ARGUMENT; - case Result::OP_FAILED: - return ErrorStatus::GENERAL_FAILURE; - case Result::FEATURE_LEVEL_TOO_LOW: - return ErrorStatus::GENERAL_FAILURE; - } - - LOG(FATAL) << "Invalid Result " << static_cast<std::underlying_type_t<Result>>(status); - return {}; -} - -bool isValidDimension(const std::vector<int32_t>& v) { - return std::all_of(v.begin(), v.end(), [](int32_t i) { return i >= 0; }); -} - -} // namespace aidl::android::hardware::neuralnetworks
diff --git a/shim_and_sl/SupportLibrary.cpp b/shim_and_sl/SupportLibrary.cpp deleted file mode 100644 index 035d5fc..0000000 --- a/shim_and_sl/SupportLibrary.cpp +++ /dev/null
@@ -1,75 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "SupportLibrary.h" - -#include <android-base/logging.h> - -#include <dlfcn.h> - -#include <cstring> -#include <memory> -#include <string> - -NnApiSupportLibrary::NnApiSupportLibrary(const NnApiSLDriverImplFL5& impl, void* libHandle) - : NnApiSLDriverImplFL5(impl), libHandle(libHandle) { - base.implFeatureLevel = ANEURALNETWORKS_FEATURE_LEVEL_5; -} - -NnApiSupportLibrary::~NnApiSupportLibrary() { - if (libHandle != nullptr) { - dlclose(libHandle); - libHandle = nullptr; - } -} - -std::unique_ptr<const NnApiSupportLibrary> loadNnApiSupportLibrary(const std::string& libName) { - void* libHandle = dlopen(libName.c_str(), RTLD_LAZY | RTLD_LOCAL); - if (libHandle == nullptr) { - LOG(ERROR) << "nnapi error: unable to open library " << libName.c_str() << " " << dlerror(); - return nullptr; - } - - auto result = loadNnApiSupportLibrary(libHandle); - if (!result) { - dlclose(libHandle); - } - return result; -} - -std::unique_ptr<const NnApiSupportLibrary> loadNnApiSupportLibrary(void* libHandle) { - NnApiSLDriverImpl* (*getSlDriverImpl)(); - getSlDriverImpl = reinterpret_cast<decltype(getSlDriverImpl)>( - dlsym(libHandle, "ANeuralNetworks_getSLDriverImpl")); - if (getSlDriverImpl == nullptr) { - LOG(ERROR) << "Failed to find ANeuralNetworks_getSLDriverImpl symbol"; - return nullptr; - } - - NnApiSLDriverImpl* impl = getSlDriverImpl(); - if (impl == nullptr) { - LOG(ERROR) << "ANeuralNetworks_getSLDriverImpl returned nullptr"; - return nullptr; - } - - if (impl->implFeatureLevel != ANEURALNETWORKS_FEATURE_LEVEL_5) { - LOG(ERROR) << "Unsupported NnApiSLDriverImpl->implFeatureLevel: " << impl->implFeatureLevel; - return nullptr; - } - - return std::make_unique<NnApiSupportLibrary>(*reinterpret_cast<NnApiSLDriverImplFL5*>(impl), - libHandle); -}
diff --git a/shim_and_sl/SupportLibraryWrapper.cpp b/shim_and_sl/SupportLibraryWrapper.cpp deleted file mode 100644 index c8c4924..0000000 --- a/shim_and_sl/SupportLibraryWrapper.cpp +++ /dev/null
@@ -1,27 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "SupportLibraryWrapper.h" - -namespace android { -namespace nn { -namespace sl_wrapper { - -Execution::ComputeMode Execution::mComputeMode = Execution::ComputeMode::SYNC; - -} // namespace sl_wrapper -} // namespace nn -} // namespace android
diff --git a/shim_and_sl/include/ShimBufferTracker.h b/shim_and_sl/include/ShimBufferTracker.h deleted file mode 100644 index 7555899..0000000 --- a/shim_and_sl/include/ShimBufferTracker.h +++ /dev/null
@@ -1,85 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include <aidl/android/hardware/neuralnetworks/BnBuffer.h> -#include <aidl/android/hardware/neuralnetworks/BnDevice.h> -#include <android/binder_auto_utils.h> - -#include <memory> -#include <stack> -#include <string> -#include <utility> -#include <vector> - -#include "NeuralNetworksShim.h" -#include "SupportLibrary.h" -#include "SupportLibraryWrapper.h" - -#include <android-base/macros.h> -#include <android-base/thread_annotations.h> - -namespace aidl::android::hardware::neuralnetworks { - -// TODO: This can be templetized. -// Keep track of all sl_rapper::Memory and assign each with a unique token. -class ShimBufferTracker : public std::enable_shared_from_this<ShimBufferTracker> { - DISALLOW_COPY_AND_ASSIGN(ShimBufferTracker); - - public: - // A RAII class to help manage the lifetime of the token. - // It is only supposed to be constructed in ShimBufferTracker::add. - class Token { - DISALLOW_COPY_AND_ASSIGN(Token); - - public: - Token(uint32_t token, std::shared_ptr<ShimBufferTracker> tracker) - : kToken(token), kBufferTracker(std::move(tracker)) {} - ~Token() { kBufferTracker->free(kToken); } - uint32_t get() const { return kToken; } - - private: - const uint32_t kToken; - const std::shared_ptr<ShimBufferTracker> kBufferTracker; - }; - - // The factory of ShimBufferTracker. This ensures that the ShimBufferTracker is always managed - // by a shared_ptr. - static std::shared_ptr<ShimBufferTracker> create() { - return std::make_shared<ShimBufferTracker>(); - } - - // Prefer ShimBufferTracker::create. - ShimBufferTracker() : mTokenToBuffers(1) {} - - std::unique_ptr<Token> add(std::shared_ptr<::android::nn::sl_wrapper::Memory> buffer); - std::shared_ptr<::android::nn::sl_wrapper::Memory> get(uint32_t token) const; - - private: - void free(uint32_t token); - - mutable std::mutex mMutex; - std::stack<uint32_t, std::vector<uint32_t>> mFreeTokens GUARDED_BY(mMutex); - - // Since the tokens are allocated in a non-sparse way, we use a vector to represent the mapping. - // The index of the vector is the token. When the token gets freed, the corresponding entry is - // set to nullptr. mTokenToBuffers[0] is always set to nullptr because 0 is an invalid token. - std::vector<std::shared_ptr<::android::nn::sl_wrapper::Memory>> mTokenToBuffers - GUARDED_BY(mMutex); -}; - -} // namespace aidl::android::hardware::neuralnetworks
diff --git a/shim_and_sl/include/ShimConverter.h b/shim_and_sl/include/ShimConverter.h deleted file mode 100644 index 7c206ea..0000000 --- a/shim_and_sl/include/ShimConverter.h +++ /dev/null
@@ -1,58 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include <memory> -#include <utility> -#include <vector> - -#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h> -#include <aidl/android/hardware/neuralnetworks/Memory.h> -#include <aidl/android/hardware/neuralnetworks/Model.h> - -#include "SupportLibrary.h" -#include "SupportLibraryWrapper.h" - -namespace aidl::android::hardware::neuralnetworks { - -struct ShimConvertedModel { - std::vector<std::unique_ptr<::android::nn::sl_wrapper::Memory>> memory; - std::vector<::android::nn::sl_wrapper::Model> models; -}; - -bool isValid(const neuralnetworks::Model& model); - -/** - * Convert HAL model into Model ready to be consumed by SL Driver. - 8 - * @param nnapi NNAPI SL Driver implementation - * @param model HAL NNAPI Model - * @param copiedOperandValues If model requires it (contains CONSTANT_COPY operands larger - * then 128 bytes), this vector will be used to hold a copy of - * model operand values. Must be non-null. - * @param errorStatus Output error status in case of failure. - * @return ShimConvertedModel with all converted memories and models. - * - */ -std::optional<ShimConvertedModel> convertFromHAL(const NnApiSupportLibrary* nnapi, - const neuralnetworks::Model& model, - std::vector<uint8_t>* copiedOperandValues, - ErrorStatus* errorStatus); -std::unique_ptr<::android::nn::sl_wrapper::Memory> convertFromHAL( - const NnApiSupportLibrary* nnapi, const neuralnetworks::Memory& pool); - -} // namespace aidl::android::hardware::neuralnetworks
diff --git a/shim_and_sl/include/ShimDevice.h b/shim_and_sl/include/ShimDevice.h deleted file mode 100644 index 48e133f..0000000 --- a/shim_and_sl/include/ShimDevice.h +++ /dev/null
@@ -1,77 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include <aidl/android/hardware/neuralnetworks/BnBuffer.h> -#include <aidl/android/hardware/neuralnetworks/BnDevice.h> - -#include <memory> -#include <stack> -#include <string> -#include <utility> -#include <vector> - -#include "NeuralNetworksShim.h" -#include "ShimBufferTracker.h" -#include "SupportLibrary.h" -#include "SupportLibraryWrapper.h" - -#include <android-base/macros.h> -#include <android-base/thread_annotations.h> - -namespace aidl::android::hardware::neuralnetworks { - -class ShimDevice : public BnDevice { - public: - ShimDevice(std::shared_ptr<const NnApiSupportLibrary>, ANeuralNetworksDevice*, - std::string serviceName); - ::ndk::ScopedAStatus allocate(const BufferDesc& desc, - const std::vector<IPreparedModelParcel>& preparedModels, - const std::vector<BufferRole>& inputRoles, - const std::vector<BufferRole>& outputRoles, - DeviceBuffer* deviceBuffer) override; - ::ndk::ScopedAStatus getCapabilities(Capabilities* capabilities) override; - ::ndk::ScopedAStatus getNumberOfCacheFilesNeeded( - NumberOfCacheFiles* numberOfCacheFiles) override; - ::ndk::ScopedAStatus getSupportedExtensions(std::vector<Extension>* extensions) override; - ::ndk::ScopedAStatus getSupportedOperations(const Model& model, - std::vector<bool>* supportedOperations) override; - ::ndk::ScopedAStatus getType(DeviceType* deviceType) override; - ::ndk::ScopedAStatus getVersionString(std::string* versionString) override; - ::ndk::ScopedAStatus prepareModel( - const Model& model, ExecutionPreference preference, Priority priority, - int64_t deadlineNs, const std::vector<::ndk::ScopedFileDescriptor>& modelCache, - const std::vector<::ndk::ScopedFileDescriptor>& dataCache, - const std::vector<uint8_t>& token, - const std::shared_ptr<IPreparedModelCallback>& callback) override; - ::ndk::ScopedAStatus prepareModelFromCache( - int64_t deadlineNs, const std::vector<::ndk::ScopedFileDescriptor>& modelCache, - const std::vector<::ndk::ScopedFileDescriptor>& dataCache, - const std::vector<uint8_t>& token, - const std::shared_ptr<IPreparedModelCallback>& callback) override; - - private: - std::shared_ptr<const NnApiSupportLibrary> mNnapi; - std::shared_ptr<ShimBufferTracker> mBufferTracker; - std::string mServiceName; - ANeuralNetworksDevice* mDevice; - Capabilities mCapabilities; - NumberOfCacheFiles mNumberOfCacheFiles; - std::vector<Extension> mExtensions; -}; - -} // namespace aidl::android::hardware::neuralnetworks
diff --git a/shim_and_sl/include/ShimDeviceManager.h b/shim_and_sl/include/ShimDeviceManager.h deleted file mode 100644 index a1afb3f..0000000 --- a/shim_and_sl/include/ShimDeviceManager.h +++ /dev/null
@@ -1,56 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include <string> -#include <vector> - -#include "NeuralNetworksShim.h" -#include "ShimDevice.h" - -namespace android::neuralnetworks::shim { - -/** - * Information about an NNAPI Device to register. - */ -struct ShimDeviceInfo { - /** - * Name of the target device, as returned by SL ANeuralNetworksDevice_getName - */ - std::string deviceName; - - /** - * Name of HAL AIDL service backed by this SL NNAPI Driver device. - */ - std::string serviceName; -}; - -struct RegistrationParams { - NnApiSLDriverImpl* nnapiSupportLibraryPackage = nullptr; - std::vector<ShimDeviceInfo> deviceInfos; - uint32_t numberOfListenerThreads = 15; - bool registerAsLazyService = false; - bool fallbackToMinimumSupportDevice = false; -}; - -ANeuralNetworksShimResultCode registerDevices(NnApiSLDriverImpl* nnapiSLImpl, - const std::vector<ShimDeviceInfo>& devicesToRegister, - uint32_t numberOfListenerThreads, - bool registerAsLazyService, - bool fallbackToMinimumSupportDevice); - -} // namespace android::neuralnetworks::shim
diff --git a/shim_and_sl/include/ShimPreparedModel.h b/shim_and_sl/include/ShimPreparedModel.h deleted file mode 100644 index c17e815..0000000 --- a/shim_and_sl/include/ShimPreparedModel.h +++ /dev/null
@@ -1,78 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include <memory> -#include <utility> -#include <vector> - -#include <aidl/android/hardware/neuralnetworks/BnPreparedModel.h> -#include <android-base/logging.h> -#include "ShimDevice.h" -#include "SupportLibrary.h" -#include "SupportLibraryWrapper.h" - -namespace aidl::android::hardware::neuralnetworks { - -class ShimPreparedModel : public BnPreparedModel { - public: - ShimPreparedModel(std::shared_ptr<const NnApiSupportLibrary> nnapi, - std::shared_ptr<ShimBufferTracker> bufferTracker, - ::android::nn::sl_wrapper::Compilation compilation, - std::vector<::android::nn::sl_wrapper::Model> mainAndReferencedModels, - std::vector<std::unique_ptr<::android::nn::sl_wrapper::Memory>> memoryPools, - std::vector<uint8_t> copiedOperandValues) - : mNnapi(nnapi), - mBufferTracker(bufferTracker), - mCompilation(std::move(compilation)), - mMainAndReferencedModels(std::move(mainAndReferencedModels)), - mMemoryPools(std::move(memoryPools)), - mCopiedOperandValues(std::move(copiedOperandValues)) { - CHECK(mMainAndReferencedModels.size() > 0); - }; - - ::ndk::ScopedAStatus executeSynchronously(const Request& request, bool measureTiming, - int64_t deadlineNs, int64_t loopTimeoutDurationNs, - ExecutionResult* executionResults) override; - ::ndk::ScopedAStatus executeFenced(const Request& request, - const std::vector<::ndk::ScopedFileDescriptor>& waitFor, - bool measureTiming, int64_t deadlineNs, - int64_t loopTimeoutDurationNs, int64_t durationNs, - FencedExecutionResult* fencedExecutionResult) override; - ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>* burst) override; - - const ::android::nn::sl_wrapper::Compilation& getCompilation() const { return mCompilation; } - const ::android::nn::sl_wrapper::Model& getMainModel() const { - return mMainAndReferencedModels[0]; - } - - private: - ErrorStatus parseInputs( - const Request& request, bool measure, int64_t deadlineNs, int64_t loopTimeoutDurationNs, - ::android::nn::sl_wrapper::Execution* execution, - std::vector<std::shared_ptr<::android::nn::sl_wrapper::Memory>>* requestMemoryPools); - - std::shared_ptr<const NnApiSupportLibrary> mNnapi; - std::shared_ptr<ShimBufferTracker> mBufferTracker; - - ::android::nn::sl_wrapper::Compilation mCompilation; - std::vector<::android::nn::sl_wrapper::Model> mMainAndReferencedModels; - std::vector<std::unique_ptr<::android::nn::sl_wrapper::Memory>> mMemoryPools; - std::vector<uint8_t> mCopiedOperandValues; -}; - -} // namespace aidl::android::hardware::neuralnetworks
diff --git a/shim_and_sl/include/ShimUtils.h b/shim_and_sl/include/ShimUtils.h deleted file mode 100644 index 7a3909a..0000000 --- a/shim_and_sl/include/ShimUtils.h +++ /dev/null
@@ -1,64 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include <aidl/android/hardware/neuralnetworks/ErrorStatus.h> -#include <android/binder_auto_utils.h> -#include <string> -#include <vector> -#include "SupportLibraryWrapper.h" - -namespace aidl::android::hardware::neuralnetworks { - -ErrorStatus convertResultToErrorStatus(::android::nn::wrapper::Result status); -bool isValidDimension(const std::vector<int32_t>& v); -ndk::ScopedAStatus toAStatus(ErrorStatus errorStatus, const std::string& errorMessage); -ndk::ScopedAStatus toAStatus(ErrorStatus errorStatus); - -#define SLW2SAS_RETURN_IF_ERROR(expr) \ - do { \ - const Result nnReturnIfErrorErrorCode = static_cast<Result>(expr); \ - if (nnReturnIfErrorErrorCode != Result::NO_ERROR) { \ - const auto nnReturnIfErrorErrorCodeConverted = \ - convertResultToErrorStatus(nnReturnIfErrorErrorCode); \ - return toAStatus(nnReturnIfErrorErrorCodeConverted); \ - } \ - } while (0) - -#define SLW2SAS_RETURN_AND_CALLBACK_IF_ERROR(expr, callback) \ - do { \ - const Result nnReturnIfErrorErrorCode = static_cast<Result>(expr); \ - if (nnReturnIfErrorErrorCode != Result::NO_ERROR) { \ - const auto nnReturnIfErrorErrorCodeConverted = \ - convertResultToErrorStatus(nnReturnIfErrorErrorCode); \ - callback->notify(nnReturnIfErrorErrorCodeConverted, nullptr); \ - return toAStatus(nnReturnIfErrorErrorCodeConverted); \ - } \ - } while (0) - -#define SLW2SAS_OK_RETURN_AND_ERROR_CALLBACK_IF_ERROR(expr, callback) \ - do { \ - const Result nnReturnIfErrorErrorCode = static_cast<Result>(expr); \ - if (nnReturnIfErrorErrorCode != Result::NO_ERROR) { \ - const auto nnReturnIfErrorErrorCodeConverted = \ - convertResultToErrorStatus(nnReturnIfErrorErrorCode); \ - callback->notify(nnReturnIfErrorErrorCodeConverted, nullptr); \ - return ndk::ScopedAStatus::ok(); \ - } \ - } while (0) - -} // namespace aidl::android::hardware::neuralnetworks
diff --git a/shim_and_sl/include/SupportLibrary.h b/shim_and_sl/include/SupportLibrary.h deleted file mode 100644 index eae98b1..0000000 --- a/shim_and_sl/include/SupportLibrary.h +++ /dev/null
@@ -1,65 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_SL_SUPPORT_LIBRARY_H -#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_SL_SUPPORT_LIBRARY_H - -#include <memory> -#include <string> - -#include "NeuralNetworksSupportLibraryImpl.h" -#include "NeuralNetworksTypes.h" - -#ifndef __NNAPI_FL5_MIN_ANDROID_API__ -#define __NNAPI_FL5_MIN_ANDROID_API__ __ANDROID_API_S__ -#endif - -/** - * Helper struct, derived from the latest NnApiSLDriverImpl. - * - * Owns the .so handle, and will close it in destructor. - * Sets proper implStructFeatureLevel in constructor. - * - * It's derived from the latest NnApiSLDriverImplFL* struct, - * so it contains all possible functionality. - * - * When a new NnApiSLDriverImpl is introduced, this class - * has to switch base class to it and provide constructors for - * all existing NnApiSLDriverImplFL* structs. - * - * There's expectation that for M>N, NnApiSLDriverImplFL(M) is - * a strict superset of NnApiSLDriverImplFL(N), and *NnApiSLDriverImplFL(M) can - * be reinterpret_cast to *NnApiSLDriverImplFL(N) safely. - * - * The base->implFeatureLevel is set to the actual Feature Level - * implemented by the SLDriverImpl, - */ -struct NnApiSupportLibrary : NnApiSLDriverImplFL5 { - NnApiSupportLibrary(const NnApiSLDriverImplFL5& impl, void* libHandle); - ~NnApiSupportLibrary(); - - void* libHandle = nullptr; -}; - -/** - * Loads the NNAPI support library. - * The NnApiSupportLibrary structure is filled with all the pointers. If one - * function doesn't exist, a null pointer is stored. - */ -std::unique_ptr<const NnApiSupportLibrary> loadNnApiSupportLibrary(const std::string& libName); -std::unique_ptr<const NnApiSupportLibrary> loadNnApiSupportLibrary(void* libHandle); - -#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_SL_SUPPORT_LIBRARY_H
diff --git a/shim_and_sl/include/SupportLibrarySymbols.h b/shim_and_sl/include/SupportLibrarySymbols.h deleted file mode 100644 index a604db2..0000000 --- a/shim_and_sl/include/SupportLibrarySymbols.h +++ /dev/null
@@ -1,32 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#pragma once - -#include <stddef.h> -#include "NeuralNetworksSupportLibraryImpl.h" - -// If you are linking against SL driver implementation through DT_NEEDED, -// you can use this declaration to access its implementation instead -// of doing dlsym. - -__BEGIN_DECLS - -/** - * Get the NNAPI SL Driver NnApiSLDriverImpl with all - * driver functions. - */ -NnApiSLDriverImpl* ANeuralNetworks_getSLDriverImpl(); -__END_DECLS
diff --git a/shim_and_sl/include/SupportLibraryWrapper.h b/shim_and_sl/include/SupportLibraryWrapper.h deleted file mode 100644 index d66deac..0000000 --- a/shim_and_sl/include/SupportLibraryWrapper.h +++ /dev/null
@@ -1,664 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Provides C++ classes to more easily use the Neural Networks API. -// TODO(b/117845862): this should be auto generated from NeuralNetworksWrapper.h. - -#ifndef ANDROID_PACKAGES_MODULES_NEURALNETWORKS_SL_SUPPORT_LIBRARY_WRAPPER_H -#define ANDROID_PACKAGES_MODULES_NEURALNETWORKS_SL_SUPPORT_LIBRARY_WRAPPER_H - -#include <android-base/unique_fd.h> -#include <android/hardware_buffer.h> -#include <math.h> -#include <unistd.h> - -#include <algorithm> -#include <memory> -#include <optional> -#include <string> -#include <utility> -#include <vector> - -#include "NeuralNetworksWrapper.h" -#include "SupportLibrary.h" - -using namespace ::android::nn::wrapper; - -namespace android { -namespace nn { -namespace sl_wrapper { - -using ::android::nn::wrapper::Duration; -using ::android::nn::wrapper::OperandType; -using ::android::nn::wrapper::Result; - -class Memory { - public: - // Takes ownership of a ANeuralNetworksMemory - Memory(const NnApiSupportLibrary* nnapi, ANeuralNetworksMemory* memory) - : mNnApi(nnapi), mMemory(memory), mSize(0) {} - - // Create from a FD and may takes ownership of the fd. - Memory(const NnApiSupportLibrary* nnapi, size_t size, int protect, int fd, size_t offset, - bool ownsFd = false) - : mNnApi(nnapi), mOwnedFd(ownsFd ? std::optional<int>{fd} : std::nullopt), mSize(size) { - mValid = mNnApi->ANeuralNetworksMemory_createFromFd(size, protect, fd, offset, &mMemory) == - ANEURALNETWORKS_NO_ERROR; - } - - // Create from a buffer, may take ownership. - Memory(const NnApiSupportLibrary* nnapi, AHardwareBuffer* buffer, bool ownAHWB, size_t size) - : mNnApi(nnapi), mOwnedAHWB(ownAHWB ? buffer : nullptr), mSize(size) { - mValid = mNnApi->ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &mMemory) == - ANEURALNETWORKS_NO_ERROR; - } - - // Create from a desc - Memory(const NnApiSupportLibrary* nnapi, ANeuralNetworksMemoryDesc* desc, size_t size) - : mNnApi(nnapi), mSize(size) { - mValid = mNnApi->ANeuralNetworksMemory_createFromDesc(desc, &mMemory) == - ANEURALNETWORKS_NO_ERROR; - } - - virtual ~Memory() { - if (mMemory) { - mNnApi->ANeuralNetworksMemory_free(mMemory); - } - if (mOwnedFd) { - close(*mOwnedFd); - } - if (mOwnedAHWB) { - AHardwareBuffer_release(mOwnedAHWB); - } - } - - // Disallow copy semantics to ensure the runtime object can only be freed - // once. Copy semantics could be enabled if some sort of reference counting - // or deep-copy system for runtime objects is added later. - Memory(const Memory&) = delete; - Memory& operator=(const Memory&) = delete; - - // Move semantics to remove access to the runtime object from the wrapper - // object that is being moved. This ensures the runtime object will be - // freed only once. - Memory(Memory&& other) { *this = std::move(other); } - Memory& operator=(Memory&& other) { - if (this != &other) { - if (mMemory) { - mNnApi->ANeuralNetworksMemory_free(mMemory); - } - if (mOwnedFd) { - close(*mOwnedFd); - } - if (mOwnedAHWB) { - AHardwareBuffer_release(mOwnedAHWB); - } - - mMemory = other.mMemory; - mValid = other.mValid; - mNnApi = other.mNnApi; - mOwnedFd = other.mOwnedFd; - mOwnedAHWB = other.mOwnedAHWB; - other.mMemory = nullptr; - other.mValid = false; - other.mOwnedFd.reset(); - other.mOwnedAHWB = nullptr; - } - return *this; - } - - ANeuralNetworksMemory* get() const { return mMemory; } - bool isValid() const { return mValid; } - size_t getSize() const { return mSize; } - Result copyTo(Memory& other) { - return static_cast<Result>(mNnApi->ANeuralNetworksMemory_copy(mMemory, other.mMemory)); - } - - private: - const NnApiSupportLibrary* mNnApi = nullptr; - ANeuralNetworksMemory* mMemory = nullptr; - bool mValid = true; - std::optional<int> mOwnedFd; - AHardwareBuffer* mOwnedAHWB = nullptr; - size_t mSize; -}; - -class Model { - public: - Model(const NnApiSupportLibrary* nnapi) : mNnApi(nnapi) { - mValid = mNnApi->ANeuralNetworksModel_create(&mModel) == ANEURALNETWORKS_NO_ERROR; - } - ~Model() { - if (mModel) { - mNnApi->ANeuralNetworksModel_free(mModel); - } - } - - // Disallow copy semantics to ensure the runtime object can only be freed - // once. Copy semantics could be enabled if some sort of reference counting - // or deep-copy system for runtime objects is added later. - Model(const Model&) = delete; - Model& operator=(const Model&) = delete; - - // Move semantics to remove access to the runtime object from the wrapper - // object that is being moved. This ensures the runtime object will be - // freed only once. - Model(Model&& other) { *this = std::move(other); } - Model& operator=(Model&& other) { - if (this != &other) { - if (mModel != nullptr) { - mNnApi->ANeuralNetworksModel_free(mModel); - } - mNnApi = other.mNnApi; - mModel = other.mModel; - mNextOperandId = other.mNextOperandId; - mValid = other.mValid; - mRelaxed = other.mRelaxed; - mFinished = other.mFinished; - mOperands = std::move(other.mOperands); - mInputs = std::move(other.mInputs); - mOutputs = std::move(other.mOutputs); - other.mModel = nullptr; - other.mNextOperandId = 0; - other.mValid = false; - other.mRelaxed = false; - other.mFinished = false; - } - return *this; - } - - Result finish() { - if (mValid) { - auto result = static_cast<Result>(mNnApi->ANeuralNetworksModel_finish(mModel)); - if (result != Result::NO_ERROR) { - mValid = false; - } - mFinished = true; - return result; - } else { - return Result::BAD_STATE; - } - } - - uint32_t addOperand(const OperandType* type) { - if (mNnApi->ANeuralNetworksModel_addOperand(mModel, &type->operandType) != - ANEURALNETWORKS_NO_ERROR) { - mValid = false; - } else { - mOperands.push_back(*type); - } - - if (type->channelQuant) { - if (mNnApi->ANeuralNetworksModel_setOperandSymmPerChannelQuantParams( - mModel, mNextOperandId, &type->channelQuant.value().params) != - ANEURALNETWORKS_NO_ERROR) { - mValid = false; - } - } - - return mNextOperandId++; - } - - template <typename T> - uint32_t addConstantOperand(const OperandType* type, const T& value) { - static_assert(sizeof(T) <= ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES, - "Values larger than ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES " - "not supported"); - uint32_t index = addOperand(type); - setOperandValue(index, &value); - return index; - } - - uint32_t addModelOperand(const Model* value) { - OperandType operandType(Type::MODEL, {}); - uint32_t operand = addOperand(&operandType); - setOperandValueFromModel(operand, value); - return operand; - } - - void setOperandValue(uint32_t index, const void* buffer, size_t length) { - if (mNnApi->ANeuralNetworksModel_setOperandValue(mModel, index, buffer, length) != - ANEURALNETWORKS_NO_ERROR) { - mValid = false; - } - } - - template <typename T> - void setOperandValue(uint32_t index, const T* value) { - static_assert(!std::is_pointer<T>(), "No operand may have a pointer as its value"); - return setOperandValue(index, value, sizeof(T)); - } - - void setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset, - size_t length) { - if (mNnApi->ANeuralNetworksModel_setOperandValueFromMemory( - mModel, index, memory->get(), offset, length) != ANEURALNETWORKS_NO_ERROR) { - mValid = false; - } - } - - void setOperandValueFromModel(uint32_t index, const Model* value) { - if (mNnApi->ANeuralNetworksModel_setOperandValueFromModel(mModel, index, value->mModel) != - ANEURALNETWORKS_NO_ERROR) { - mValid = false; - } - } - - void setOperandValueFromModel(uint32_t index, ANeuralNetworksModel* value) { - if (mNnApi->ANeuralNetworksModel_setOperandValueFromModel(mModel, index, value) != - ANEURALNETWORKS_NO_ERROR) { - mValid = false; - } - } - - void addOperation(ANeuralNetworksOperationType type, const std::vector<uint32_t>& inputs, - const std::vector<uint32_t>& outputs) { - if (mNnApi->ANeuralNetworksModel_addOperation( - mModel, type, static_cast<uint32_t>(inputs.size()), inputs.data(), - static_cast<uint32_t>(outputs.size()), - outputs.data()) != ANEURALNETWORKS_NO_ERROR) { - mValid = false; - } - } - void identifyInputsAndOutputs(const std::vector<uint32_t>& inputs, - const std::vector<uint32_t>& outputs) { - if (mNnApi->ANeuralNetworksModel_identifyInputsAndOutputs( - mModel, static_cast<uint32_t>(inputs.size()), inputs.data(), - static_cast<uint32_t>(outputs.size()), - outputs.data()) != ANEURALNETWORKS_NO_ERROR) { - mValid = false; - } else { - mInputs = inputs; - mOutputs = outputs; - } - } - - void relaxComputationFloat32toFloat16(bool isRelax) { - if (mNnApi->ANeuralNetworksModel_relaxComputationFloat32toFloat16(mModel, isRelax) == - ANEURALNETWORKS_NO_ERROR) { - mRelaxed = isRelax; - } - } - - void getExtensionOperandType(const std::string& extensionName, - uint16_t operandCodeWithinExtension, int32_t* type) { - if (mNnApi->ANeuralNetworksModel_getExtensionOperandType( - mModel, extensionName.c_str(), operandCodeWithinExtension, type) != - ANEURALNETWORKS_NO_ERROR) { - mValid = false; - } - } - - void getExtensionOperationType(const std::string& extensionName, - uint16_t operandCodeWithinExtension, - ANeuralNetworksOperationType* type) { - if (mNnApi->ANeuralNetworksModel_getExtensionOperationType( - mModel, extensionName.c_str(), operandCodeWithinExtension, type) != - ANEURALNETWORKS_NO_ERROR) { - mValid = false; - } - } - - void setOperandExtensionData(int32_t operandId, const void* data, size_t length) { - if (mNnApi->ANeuralNetworksModel_setOperandExtensionData(mModel, operandId, data, length) != - ANEURALNETWORKS_NO_ERROR) { - mValid = false; - } - } - - ANeuralNetworksModel* getHandle() const { return mModel; } - bool isValid() const { return mValid; } - bool isRelaxed() const { return mRelaxed; } - bool isFinished() const { return mFinished; } - - const std::vector<uint32_t>& getInputs() const { return mInputs; } - const std::vector<uint32_t>& getOutputs() const { return mOutputs; } - const std::vector<OperandType>& getOperands() const { return mOperands; } - - protected: - const NnApiSupportLibrary* mNnApi = nullptr; - ANeuralNetworksModel* mModel = nullptr; - // We keep track of the operand ID as a convenience to the caller. - uint32_t mNextOperandId = 0; - // We keep track of the operand datatypes/dimensions as a convenience to the caller. - std::vector<OperandType> mOperands; - std::vector<uint32_t> mInputs; - std::vector<uint32_t> mOutputs; - bool mValid = true; - bool mRelaxed = false; - bool mFinished = false; -}; - -class Compilation { - public: - // On success, createForDevice(s) will return Result::NO_ERROR and the created compilation; - // otherwise, it will return the error code and Compilation object wrapping a nullptr handle. - static std::pair<Result, Compilation> createForDevice(const NnApiSupportLibrary* nnapi, - const Model* model, - const ANeuralNetworksDevice* device) { - return createForDevices(nnapi, model, {device}); - } - static std::pair<Result, Compilation> createForDevices( - const NnApiSupportLibrary* nnapi, const Model* model, - const std::vector<const ANeuralNetworksDevice*>& devices) { - ANeuralNetworksCompilation* compilation = nullptr; - const Result result = - static_cast<Result>(nnapi->ANeuralNetworksCompilation_createForDevices( - model->getHandle(), devices.empty() ? nullptr : devices.data(), - devices.size(), &compilation)); - return {result, Compilation(nnapi, compilation)}; - } - - ~Compilation() { mNnApi->ANeuralNetworksCompilation_free(mCompilation); } - - // Disallow copy semantics to ensure the runtime object can only be freed - // once. Copy semantics could be enabled if some sort of reference counting - // or deep-copy system for runtime objects is added later. - Compilation(const Compilation&) = delete; - Compilation& operator=(const Compilation&) = delete; - - // Move semantics to remove access to the runtime object from the wrapper - // object that is being moved. This ensures the runtime object will be - // freed only once. - Compilation(Compilation&& other) { *this = std::move(other); } - Compilation& operator=(Compilation&& other) { - if (this != &other) { - mNnApi = other.mNnApi; - mNnApi->ANeuralNetworksCompilation_free(mCompilation); - mCompilation = other.mCompilation; - other.mCompilation = nullptr; - } - return *this; - } - - Result setPreference(ExecutePreference preference) { - return static_cast<Result>(mNnApi->ANeuralNetworksCompilation_setPreference( - mCompilation, static_cast<int32_t>(preference))); - } - - Result setPriority(ExecutePriority priority) { - return static_cast<Result>(mNnApi->ANeuralNetworksCompilation_setPriority( - mCompilation, static_cast<int32_t>(priority))); - } - - Result setTimeout(uint64_t durationNs) { - return static_cast<Result>( - mNnApi->ANeuralNetworksCompilation_setTimeout(mCompilation, durationNs)); - } - - Result setCaching(const std::string& cacheDir, const std::vector<uint8_t>& token) { - if (token.size() != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN) { - return Result::BAD_DATA; - } - return static_cast<Result>(mNnApi->ANeuralNetworksCompilation_setCaching( - mCompilation, cacheDir.c_str(), token.data())); - } - - Result setCachingFromFds(const std::vector<int>& modelCacheFds, - const std::vector<int>& dataCacheFds, - const std::vector<uint8_t>& token) { - if (token.size() != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN) { - return Result::BAD_DATA; - } - return static_cast<Result>(mNnApi->SL_ANeuralNetworksCompilation_setCachingFromFds( - mCompilation, modelCacheFds.data(), modelCacheFds.size(), dataCacheFds.data(), - dataCacheFds.size(), token.data())); - } - - Result setCachingFromFds(const std::vector<base::unique_fd>& modelCacheOwnedFds, - const std::vector<base::unique_fd>& dataCacheOwnedFds, - const std::vector<uint8_t>& token) { - std::vector<int> modelCacheFds, dataCacheFds; - for (const auto& fd : modelCacheOwnedFds) { - modelCacheFds.push_back(fd.get()); - } - for (const auto& fd : dataCacheOwnedFds) { - dataCacheFds.push_back(fd.get()); - } - return setCachingFromFds(modelCacheFds, dataCacheFds, token); - } - - Result finish() { - return static_cast<Result>(mNnApi->ANeuralNetworksCompilation_finish(mCompilation)); - } - - Result getPreferredMemoryAlignmentForInput(uint32_t index, uint32_t* alignment) const { - return static_cast<Result>( - mNnApi->ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput( - mCompilation, index, alignment)); - }; - - Result getPreferredMemoryPaddingForInput(uint32_t index, uint32_t* padding) const { - return static_cast<Result>( - mNnApi->ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput( - mCompilation, index, padding)); - }; - - Result getPreferredMemoryAlignmentForOutput(uint32_t index, uint32_t* alignment) const { - return static_cast<Result>( - mNnApi->ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput( - mCompilation, index, alignment)); - }; - - Result getPreferredMemoryPaddingForOutput(uint32_t index, uint32_t* padding) const { - return static_cast<Result>( - mNnApi->ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput( - mCompilation, index, padding)); - }; - - ANeuralNetworksCompilation* getHandle() const { return mCompilation; } - - protected: - // Takes the ownership of ANeuralNetworksCompilation. - Compilation(const NnApiSupportLibrary* nnapi, ANeuralNetworksCompilation* compilation) - : mNnApi(nnapi), mCompilation(compilation) {} - - const NnApiSupportLibrary* mNnApi = nullptr; - ANeuralNetworksCompilation* mCompilation = nullptr; -}; - -class Execution { - public: - Execution(const NnApiSupportLibrary* nnapi, const Compilation* compilation) - : mNnApi(nnapi), mCompilation(compilation->getHandle()) { - int result = mNnApi->ANeuralNetworksExecution_create(compilation->getHandle(), &mExecution); - if (result != 0) { - // TODO Handle the error - } - } - - ~Execution() { - if (mExecution) { - mNnApi->ANeuralNetworksExecution_free(mExecution); - } - } - - // Disallow copy semantics to ensure the runtime object can only be freed - // once. Copy semantics could be enabled if some sort of reference counting - // or deep-copy system for runtime objects is added later. - Execution(const Execution&) = delete; - Execution& operator=(const Execution&) = delete; - - // Move semantics to remove access to the runtime object from the wrapper - // object that is being moved. This ensures the runtime object will be - // freed only once. - Execution(Execution&& other) { *this = std::move(other); } - Execution& operator=(Execution&& other) { - if (this != &other) { - if (mExecution != nullptr) { - mNnApi->ANeuralNetworksExecution_free(mExecution); - } - mNnApi = other.mNnApi; - mCompilation = other.mCompilation; - mExecution = other.mExecution; - other.mCompilation = nullptr; - other.mExecution = nullptr; - } - return *this; - } - - Result setInput(uint32_t index, const void* buffer, size_t length, - const ANeuralNetworksOperandType* type = nullptr) { - return static_cast<Result>( - mNnApi->ANeuralNetworksExecution_setInput(mExecution, index, type, buffer, length)); - } - - template <typename T> - Result setInput(uint32_t index, const T* value, - const ANeuralNetworksOperandType* type = nullptr) { - static_assert(!std::is_pointer<T>(), "No operand may have a pointer as its value"); - return setInput(index, value, sizeof(T), type); - } - - Result setInputFromMemory(uint32_t index, const Memory* memory, uint32_t offset, - uint32_t length, const ANeuralNetworksOperandType* type = nullptr) { - return static_cast<Result>(mNnApi->ANeuralNetworksExecution_setInputFromMemory( - mExecution, index, type, memory->get(), offset, length)); - } - - Result setOutput(uint32_t index, void* buffer, size_t length, - const ANeuralNetworksOperandType* type = nullptr) { - return static_cast<Result>(mNnApi->ANeuralNetworksExecution_setOutput( - mExecution, index, type, buffer, length)); - } - - template <typename T> - Result setOutput(uint32_t index, T* value, const ANeuralNetworksOperandType* type = nullptr) { - static_assert(!std::is_pointer<T>(), "No operand may have a pointer as its value"); - return setOutput(index, value, sizeof(T), type); - } - - Result setOutputFromMemory(uint32_t index, const Memory* memory, uint32_t offset, - uint32_t length, const ANeuralNetworksOperandType* type = nullptr) { - return static_cast<Result>(mNnApi->ANeuralNetworksExecution_setOutputFromMemory( - mExecution, index, type, memory->get(), offset, length)); - } - - Result setLoopTimeout(uint64_t duration) { - return static_cast<Result>( - mNnApi->ANeuralNetworksExecution_setLoopTimeout(mExecution, duration)); - } - - Result setMeasureTiming(bool measure) { - return static_cast<Result>( - mNnApi->ANeuralNetworksExecution_setMeasureTiming(mExecution, measure)); - } - - Result setTimeout(uint64_t duration) { - return static_cast<Result>( - mNnApi->ANeuralNetworksExecution_setTimeout(mExecution, duration)); - } - - Result getDuration(Duration durationCode, uint64_t* duration) { - return static_cast<Result>(mNnApi->ANeuralNetworksExecution_getDuration( - mExecution, static_cast<int32_t>(durationCode), duration)); - } - - Result enableInputAndOutputPadding(bool enable) { - return static_cast<Result>( - mNnApi->ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, enable)); - } - - Result setReusable(bool reusable) { - return static_cast<Result>( - mNnApi->ANeuralNetworksExecution_setReusable(mExecution, reusable)); - } - - // By default, compute() uses the synchronous API. Either an argument or - // setComputeMode() can be used to change the behavior of compute() to - // use the burst API - // Returns the previous ComputeMode. - enum class ComputeMode { SYNC, BURST, FENCED }; - static ComputeMode setComputeMode(ComputeMode mode) { - ComputeMode oldComputeMode = mComputeMode; - mComputeMode = mode; - return oldComputeMode; - } - static ComputeMode getComputeMode() { return mComputeMode; } - - Result compute(ComputeMode computeMode = mComputeMode) { - switch (computeMode) { - case ComputeMode::SYNC: { - return static_cast<Result>(mNnApi->ANeuralNetworksExecution_compute(mExecution)); - } - case ComputeMode::BURST: { - ANeuralNetworksBurst* burst = nullptr; - Result result = static_cast<Result>( - mNnApi->ANeuralNetworksBurst_create(mCompilation, &burst)); - if (result != Result::NO_ERROR) { - return result; - } - result = static_cast<Result>( - mNnApi->ANeuralNetworksExecution_burstCompute(mExecution, burst)); - mNnApi->ANeuralNetworksBurst_free(burst); - return result; - } - case ComputeMode::FENCED: { - ANeuralNetworksEvent* event = nullptr; - Result result = static_cast<Result>( - mNnApi->ANeuralNetworksExecution_startComputeWithDependencies( - mExecution, nullptr, 0, 0, &event)); - if (result != Result::NO_ERROR) { - return result; - } - result = static_cast<Result>(mNnApi->ANeuralNetworksEvent_wait(event)); - mNnApi->ANeuralNetworksEvent_free(event); - return result; - } - } - return Result::BAD_DATA; - } - - Result startComputeWithDependencies(const std::vector<const ANeuralNetworksEvent*>& deps, - uint64_t duration, Event* event) { - ANeuralNetworksEvent* ev = nullptr; - Result result = static_cast<Result>( - NNAPI_CALL(ANeuralNetworksExecution_startComputeWithDependencies( - mExecution, deps.data(), deps.size(), duration, &ev))); - event->set(ev); - return result; - } - - Result getOutputOperandDimensions(uint32_t index, std::vector<uint32_t>* dimensions) { - uint32_t rank = 0; - Result result = static_cast<Result>( - mNnApi->ANeuralNetworksExecution_getOutputOperandRank(mExecution, index, &rank)); - dimensions->resize(rank); - if ((result != Result::NO_ERROR && result != Result::OUTPUT_INSUFFICIENT_SIZE) || - rank == 0) { - return result; - } - result = static_cast<Result>(mNnApi->ANeuralNetworksExecution_getOutputOperandDimensions( - mExecution, index, dimensions->data())); - return result; - } - - ANeuralNetworksExecution* getHandle() { return mExecution; }; - - private: - const NnApiSupportLibrary* mNnApi = nullptr; - ANeuralNetworksCompilation* mCompilation = nullptr; - ANeuralNetworksExecution* mExecution = nullptr; - - // Initialized to ComputeMode::SYNC in TestNeuralNetworksWrapper.cpp. - static ComputeMode mComputeMode; -}; - -} // namespace sl_wrapper -} // namespace nn -} // namespace android - -#endif // ANDROID_PACKAGES_MODULES_NEURALNETWORKS_SL_SUPPORT_LIBRARY_WRAPPER_H
diff --git a/shim_and_sl/public/NeuralNetworksShim.h b/shim_and_sl/public/NeuralNetworksShim.h deleted file mode 100644 index afc185b..0000000 --- a/shim_and_sl/public/NeuralNetworksShim.h +++ /dev/null
@@ -1,248 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @addtogroup NeuralNetworks - * @{ - */ - -/** - * @file NeuralNetworksShim.h - */ - -#pragma once - -/****************************************************************** - * - * IMPORTANT NOTICE: - * - * This file is part of Android's set of stable system headers - * exposed by the Android NDK (Native Development Kit). - * - * Third-party source AND binary code relies on the definitions - * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. - * - * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) - * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS - * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY - * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES - */ - -#include <stdbool.h> -#include <stddef.h> -#include <stdint.h> -#include <sys/cdefs.h> - -#include "NeuralNetworksSupportLibraryImpl.h" - -__BEGIN_DECLS - -/** - * Result codes. - */ -typedef enum { - ANNSHIM_NO_ERROR = 0, - - /** - * Failure caused by failure to load support library driver. - */ - ANNSHIM_FAILED_TO_LOAD_SL = 1, - - /** - * Failure caused by failure to register HAL service. - */ - ANNSHIM_FAILED_TO_REGISTER_SERVICE = 2, - - /** - * General failure. - */ - ANNSHIM_GENERAL_ERROR = 3, - - /** - * Invalid argument - */ - ANNSHIM_INVALID_ARGUMENT = 4, - -} ANeuralNetworksShimResultCode; - -/** - * Supplementary information required to expose NNAPI HAL Service on top of - * a NNAPI SL Driver. - */ -typedef struct ANeuralNetworksShimDeviceInfo ANeuralNetworksShimDeviceInfo; - -/** - * Additional parameters indicating how to devices should be registered. - */ -typedef struct ANeuralNetworksShimRegistrationParams ANeuralNetworksShimRegistrationParams; - -/** - * Allocate ANeuralNetworksShimDeviceInfo struct with a device name. - * - * Available since API level 31. - * - * @param deviceInfo The {@link ANeuralNetworksShimDeviceInfo} to be created. - * Set to NULL if unsuccessful. - * @param deviceName has to match NNAPI Device name exposed by SL Driver. - * @param serviceName name of the AIDL service backed by this SL Driver device. - * If null, the deviceName will be used as the service name. - * @return {@link ANeuralNetworksShimResultCode} enum values. - * Returns ANNSHIM_NO_ERROR if successful. - */ -int ANeuralNetworksShimDeviceInfo_create( - ANeuralNetworksShimDeviceInfo* _Nullable* _Nonnull deviceInfo, - const char* _Nonnull deviceName, const char* _Nullable serviceName) __INTRODUCED_IN(31); - -/** - * Free ANeuralNetworksShimDeviceInfo struct. - * - * Available since API level 31. - * - * @param deviceInfo The NNAPI shim device info to be destroyed. Passing NULL is acceptable and - * results in no operation. - */ -void ANeuralNetworksShimDeviceInfo_free(ANeuralNetworksShimDeviceInfo* _Nonnull deviceInfo) - __INTRODUCED_IN(31); - -/** - * Allocate ANeuralNetworksShimRegistrationParams struct. - * - * Available since API level 31. - * - * @param nnapiSupportLibraryPackage Handle to a NNAPI SL implementation. - * @param outRegistrationParams The {@link ANeuralNetworksShimRegistrationParams} to be created. - * Set to NULL if unsuccessful. - * @return {@link ANeuralNetworksShimResultCode} enum values. - * Returns ANNSHIM_NO_ERROR if successful. - */ -int ANeuralNetworksShimRegistrationParams_create( - NnApiSLDriverImpl* _Nonnull nnapiSupportLibraryPackage, - ANeuralNetworksShimRegistrationParams* _Nullable* _Nonnull outRegistrationParams) - __INTRODUCED_IN(31); - -/** - * Free ANeuralNetworksShimRegistrationParams struct. - * - * Available since API level 31. - * - * @param registrationParams The NNAPI shim registration parameters to be destroyed. Passing NULL is - * acceptable and results in no operation. - */ -void ANeuralNetworksShimRegistrationParams_free( - ANeuralNetworksShimRegistrationParams* _Nonnull registrationParams) __INTRODUCED_IN(31); - -/** - * Add device info to the registration parameters. - * - * Available since API level 31. - * - * @param registrationParams The NNAPI shim registration parameter struct to be modified. - * @param devicesToRegister ANeuralNetworksShimDeviceInfo struct, with name and supplementary info - * about NNAPI device to register. - * @return {@link ANeuralNetworksShimResultCode} enum values. - * Returns ANNSHIM_NO_ERROR if successful. - */ -int ANeuralNetworksShimRegistrationParams_addDeviceInfo( - ANeuralNetworksShimRegistrationParams* _Nonnull registrationParams, - const ANeuralNetworksShimDeviceInfo* _Nonnull deviceInfo) __INTRODUCED_IN(31); - -/** - * Set the number of listener threads for all registered services. - * - * By default, this value is 15, but this default may change in the future. The provided value must - * be non-zero. - * - * Available since API level 31. - * - * @param registrationParams The NNAPI shim registration parameter struct to be modified. - * @param numberOfListenerThreads Number of listener threads for the registered services. - * @return {@link ANeuralNetworksShimResultCode} enum values. - * Returns ANNSHIM_NO_ERROR if successful. - */ -int ANeuralNetworksShimRegistrationParams_setNumberOfListenerThreads( - ANeuralNetworksShimRegistrationParams* _Nonnull registrationParams, - uint32_t numberOfListenerThreads) __INTRODUCED_IN(31); - -/** - * Set whether to register the service eagerly or lazily. - * - * By default, the service is eagerly registered. - * - * Available since API level 31. - * - * @param registrationParams The NNAPI shim registration parameter struct to be modified. - * @param asLazy 'false' if the services should be registered with - * {@link AServiceManager_addService}, 'true' if the services should be registered - * with {@link AServiceManager_registerLazyService}. - * @return {@link ANeuralNetworksShimResultCode} enum values. - * Returns ANNSHIM_NO_ERROR if successful. - */ -int ANeuralNetworksShimRegistrationParams_registerAsLazyService( - ANeuralNetworksShimRegistrationParams* _Nonnull registrationParams, bool asLazy) - __INTRODUCED_IN(31); - -/** - * Specifies whether a minimum support device should be registered in the event a specified driver - * could not be registered from the NNAPI SL implementation. - * - * When called, {@link ANeuralNetworksShim_registerSupportLibraryService} will attempt to register - * all drivers specified by a {@link ANeuralNetworksShimDeviceInfo}. For some clients of this API - * (such as the legacy NNAPI vendor drivers), failing to register any driver should be considered - * an error, and {@link ANeuralNetworksShim_registerSupportLibraryService} should return with an - * error code. However, for other clients of this API (such as the NNAPI updatable vendor drivers), - * failing to register one driver should not prevent other drivers from being registered; instead, a - * driver with minimum support should instead be registered so that the devices registered with the - * Service Manager matches the service instances listed in the device manifest. - * - * By default, {@link ANeuralNetworksShim_registerSupportLibraryService} will immediately return - * with an error instead of registering a minimum support device. - * - * Available since API level 31. - * - * @param registrationParams The NNAPI shim registration parameter struct to be modified. - * @param fallback 'true' if a minimal device should be registered when the actual device is not - * able to be registered, 'false' if - * {@link ANeuralNetworksShim_registerSupportLibraryService} should instead - * immediately fail with an error. - * @return {@link ANeuralNetworksShimResultCode} enum values. - * Returns ANNSHIM_NO_ERROR if successful. - */ -int ANeuralNetworksShimRegistrationParams_fallbackToMinimumSupportDevice( - ANeuralNetworksShimRegistrationParams* _Nonnull registrationParams, bool fallback) - __INTRODUCED_IN(31); - -/** - * Register NNAPI support library driver as HAL services. - * - * Takes a NNAPI SL implementation and registers each NNAPI Device it exposes as a - * separate HAL/AIDL service. - * - * If loading SL driver is successful, it blocks and never returns. If there's - * any problem with the support library driver, it returns on error. - * - * Available since API level 31. - * - * @param registrationParams Additional arguments for how the devices should be registered. - * @return {@link ANeuralNetworksShimResultCode} enum values. - * Blocks forever if successful. - */ -int ANeuralNetworksShim_registerSupportLibraryService( - const ANeuralNetworksShimRegistrationParams* _Nonnull registrationParams) - __INTRODUCED_IN(31); - -__END_DECLS - -/** @} */
diff --git a/shim_and_sl/public/NeuralNetworksSupportLibraryImpl.h b/shim_and_sl/public/NeuralNetworksSupportLibraryImpl.h deleted file mode 100644 index ee3c529..0000000 --- a/shim_and_sl/public/NeuralNetworksSupportLibraryImpl.h +++ /dev/null
@@ -1,1030 +0,0 @@ -/* - * Copyright (C) 2021 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -/****************************************************************** - * - * IMPORTANT NOTICE: - * - * This file is part of Android's set of stable system headers - * exposed by the Android NDK (Native Development Kit). - * - * Third-party source AND binary code relies on the definitions - * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. - * - * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) - * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS - * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY - * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES - */ - -#include <stdint.h> -#include <stdio.h> -#include <stdlib.h> - -#include <NeuralNetworksTypes.h> - -__BEGIN_DECLS - -/** - * Performance information for the reference workload. - * - * Used by a driver to report its performance characteristics. - */ -typedef struct { - /** - * Ratio of the time taken by the driver to execute the workload compared to the time the CPU - * would take for the same workload. A lower number is better. - */ - float execTime; - - /** - * Ratio of the energy used by the driver compared to what the CPU would use for doing the same - * workload. A lower number is better. - */ - float powerUsage; -} SL_ANeuralNetworksPerformanceInfo; - -/** - * Driver performance when operating on a particular data type. In the case of float32 data, this is - * used when the calculations are not relaxed. - */ -typedef struct { - int32_t operandType; - SL_ANeuralNetworksPerformanceInfo performanceInfo; -} SL_ANeuralNetworksOperandPerformanceInfo; - -/** - * Information about NNAPI Vendor extension operand type. - */ -typedef struct { - /** - * The byte size of the operand (if scalar) or of a single element (if tensor). - */ - uint32_t byteSize; - - /** - * The extension operand type. - */ - uint16_t type; - - /** - * Indicates whether the extension operand type represents a tensor or a scalar. - */ - bool isTensor; -} SL_ANeuralNetworksExtensionOperandTypeInformation; - -/** - * The different performance info kinds. - */ -typedef enum { - /** - * Driver performance when operating on float32 data but performing calculations with range - * and/or precision as low as that of the IEEE 754 16-bit floating-point format. - */ - SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_RELAXED_SCALAR = 0, - - /** - * Driver performance when operating on float32 data but performing calculations with range - * and/or precision as low as that of the IEEE 754 16-bit floating-point format. - */ - SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_RELAXED_TENSOR = 1, - - /** - * Performance of an {@link ANEURALNETWORKS_IF} operation is the sum of {@link - * ANEURALNETWORKS_IF}'s performance and the mean of performance for the two branch subgraphs, - * where performance for a subgraph is the sum of the performance of all operations within the - * subgraph. - */ - SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_IF = 2, - - /** - * Performance of a {@link ANEURALNETWORKS_WHILE} operation is the sum of {@link - * ANEURALNETWORKS_WHILE}'s performance, performance for the condition subgraph and performance - * for the body subgraph, where performance for a subgraph is the sum of the performance of all - * operations within the subgraph. - */ - SL_ANEURALNETWORKS_CAPABILITIES_PERFORMANCE_WHILE = 3, -} SL_ANeuralNetworksPerformanceInfoCode; - -/** - * Sets the compilation caching signature and file descriptors. - * - * Provides optional caching information to the support library driver for - * faster repeated compilation. - * - * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. - * - * @param compilation The compilation to be modified. - * @param modelCacheFds An array of file descriptors for the security-sensitive cache. - * The file descriptors will be duplicated. - * @param numModelCacheFiles The number of the model cache files. - * @param dataCacheFds An array of file descriptors for the constants' cache. - * The file descriptors will be duplicated. - * @param numDataCacheFiles The number of the data cache files. - * @param token The token provided by the user to specify a model must be of length - * ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should ensure that - * the token is unique to a model within the application. The NNAPI - * runtime cannot detect token collisions; a collision will result in a - * failed execution or in a successful execution that produces incorrect - * output values. - * - * @return ANEURALNETWORKS_NO_ERROR if successful. - * - * Available in the compabibility library build only. - */ -int SL_ANeuralNetworksCompilation_setCachingFromFds(ANeuralNetworksCompilation* compilation, - const int* modelCacheFds, - const uint32_t numModelCacheFiles, - const int* dataCacheFds, - const uint32_t numDataCacheFiles, - const uint8_t* token); - -/** - * Gets the caching requirements of the driver implementation. - * - * There are two types of cache file descriptors provided to the driver: model cache and data cache. - * - * The data cache is for caching constant data, possibly including preprocessed and transformed - * tensor buffers. Any modification to the data cache should have no worse effect than generating - * bad output values at execution time. - * - * The model cache is for caching security-sensitive data such as compiled executable machine code - * in the device's native binary format. A modification to the model cache may affect the driver's - * execution behavior, and a malicious client could make use of this to execute beyond the granted - * permission. - * - * ANeuralNetworksDevice_getNumberOfCacheFilesNeeded returns how many of each type of cache files - * the driver implementation needs to cache a single compilation. Returning 0 for both types - * indicates compilation caching is not supported by this driver. The driver may still choose not to - * cache certain compiled models even if it reports that caching is supported. - * - * @param device The representation of the specified device. - * @param numModelCacheFiles The number of the model cache files. A value of 0 is returned on error. - * @param numDataCacheFiles The number of the data cache files. A value of 0 is returned on error. - * - * @return ANEURALNETWORKS_NO_ERROR if successful. - * - * Available in the compabibility library build only. - */ -int SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded(const ANeuralNetworksDevice* device, - uint32_t* numModelCacheFiles, - uint32_t* numDataCacheFiles); - -/** - * Get NNAPI Device performance/power capabilities. - * - * This returns performance of non-extension operations. - * - * Performance of an operation other than {@link ANEURALNETWORKS_IF} and {@link - * ANEURALNETWORKS_WHILE} comes from the type of its first operand. - * - * @param device The representation of the specified device. - * @param performanceInfoKind The kind of performance info to be queried. Must be one of the values - * from {@link SL_ANeuralNetworksPerformanceInfoCode}. - * @return ANEURALNETWORKS_NO_ERROR if successful. - * - * Available in the compabibility library build only. - */ -int SL_ANeuralNetworksDevice_getPerformanceInfo(const ANeuralNetworksDevice* device, - int32_t performanceInfoKind, - SL_ANeuralNetworksPerformanceInfo* performanceInfo); - -/** - * Get NNAPI Device operand performance/power capabilities. - * - * This returns performance of non-extension operations. - * - * Performance of an operation other than {@link ANEURALNETWORKS_IF} and {@link - * ANEURALNETWORKS_WHILE} comes from the type of its first operand. - * - * @param device The representation of the specified device. - * @param context Context to pass to the callback. - * @param callback Callback taking operand performance and context. - * @return ANEURALNETWORKS_NO_ERROR if successful. - * - * Available in the compabibility library build only. - */ -int SL_ANeuralNetworksDevice_forEachOperandPerformanceInfo( - const ANeuralNetworksDevice* device, void* context, - void (*callback)(SL_ANeuralNetworksOperandPerformanceInfo, void*)); - -/** - * Get the number of extensions supported by the driver implementation. - * - * @param device The representation of the specified device. - * @param vendorExtensionCount The number of vendor extensions the device supports. To be used in - * {@link ANeuralNetworksDevice_getVendorExtensionName} and {@link - * ANeuralNetworksDevice_forEachVendorExtensionOperandTypeInformation}. - * @return ANEURALNETWORKS_NO_ERROR if successful. - * - * Available in the compabibility library build only. - */ -int SL_ANeuralNetworksDevice_getVendorExtensionCount(const ANeuralNetworksDevice* device, - uint32_t* vendorExtensionCount); - -/** - * Gets information about a specified extension supported by the driver implementation. - * - * @param device The representation of the specified device. - * @param vendorExtensionIndex The index of the specified vendor extension. Must be less than the - * number of available vendor extensions. - * @param extensionName Name of the NNAPI HAL Extension. - * @return ANEURALNETWORKS_NO_ERROR if successful. - * - * Available in the compabibility library build only. - */ -int SL_ANeuralNetworksDevice_getVendorExtensionName(const ANeuralNetworksDevice* device, - uint32_t vendorExtensionIndex, - const char** extensionName); - -/** - * Gets a specified extension's operand type information supported by the driver implementation. - * - * @param device The representation of the specified device. - * @param vendorExtensionIndex The index of the specified vendor extension. Must be less than the - * number of available vendor extensions. - * @param context Context to pass to the callback. - * @param callback Callback taking operand type information and context. - * @return ANEURALNETWORKS_NO_ERROR if successful. - * - * Available in the compabibility library build only. - */ -int SL_ANeuralNetworksDevice_forEachVendorExtensionOperandTypeInformation( - const ANeuralNetworksDevice* device, uint32_t vendorExtensionIndex, void* context, - void (*callback)(SL_ANeuralNetworksExtensionOperandTypeInformation, void*)); - -/** - * Base version of NnApiSLDriverImpl with version information. - * - * NnApiSLDriverImpl is non-opaque, versioning struct to make it possible to pass - * its instance straight from the SL Driver to the shim registration. The glue code - * that loads the SL and calls the shim is non-updatable. An opaque struct would require the - * glue code to be updated if we would like to use newer NNAPI Feature Level. - * - * There's expectation that for M>N, NnApiSLDriverImplFL(M) is - * a strict superset of NnApiSLDriverImplFL(N), and NnApiSLDriverImplFL(M)* can - * be reinterpret_cast to NnApiSLDriverImplFL(N)* safely. - */ -typedef struct { - /** - * Version of the NnApiSLDriverImpl struct. Uses {@link FeatureLevelCode} values - * for versioning. - */ - int64_t implFeatureLevel; -} NnApiSLDriverImpl; - -/** - * NnApiSLDriverImpl for an Updatable SL Driver implementing {@link - * ANEURALNETWORKS_FEATURE_LEVEL_5}. - * - * This struct must set its implFeatureLevel to {@link ANEURALNETWORKS_FEATURE_LEVEL_5}. - */ -typedef struct { - /** - * Base type with version information. Allows to cast a pointer of this type - * to NnApiSLDriverImpl* with valid version information. - * For this type, its .version fields should be always set to {@link - * ANEURALNETWORKS_FEATURE_LEVEL_5}. - */ - NnApiSLDriverImpl base; - - /** - * SL Driver implementation of {@link ANeuralNetworksBurst_create}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksBurst_create}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksBurst_create)(ANeuralNetworksCompilation* compilation, - ANeuralNetworksBurst** burst); - - /** - * SL Driver implementation of {@link ANeuralNetworksBurst_free}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksBurst_free}, - * at the feature level of this NnApiSLDriver struct. - */ - void (*ANeuralNetworksBurst_free)(ANeuralNetworksBurst* burst); - - /** - * SL Driver implementation of {@link ANeuralNetworksCompilation_createForDevices}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksCompilation_createForDevices}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksCompilation_createForDevices)(ANeuralNetworksModel* model, - const ANeuralNetworksDevice* const* devices, - uint32_t numDevices, - ANeuralNetworksCompilation** compilation); - - /** - * SL Driver implementation of {@link ANeuralNetworksCompilation_finish}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksCompilation_finish}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksCompilation_finish)(ANeuralNetworksCompilation* compilation); - - /** - * SL Driver implementation of {@link ANeuralNetworksCompilation_free}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksCompilation_free}, - * at the feature level of this NnApiSLDriver struct. - */ - void (*ANeuralNetworksCompilation_free)(ANeuralNetworksCompilation* compilation); - - /** - * SL Driver implementation of {@link - * ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput}. Behavior, arguments, and - * outputs match NNAPI Runtime function - * {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput)( - const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment); - - /** - * SL Driver implementation of {@link - * ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput}. Behavior, arguments, and - * outputs match NNAPI Runtime function - * {@link ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput)( - const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* alignment); - - /** - * SL Driver implementation of {@link - * ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput}. Behavior, arguments, and - * outputs match NNAPI Runtime function - * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput)( - const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding); - - /** - * SL Driver implementation of {@link - * ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput}. Behavior, arguments, and - * outputs match NNAPI Runtime function - * {@link ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput)( - const ANeuralNetworksCompilation* compilation, uint32_t index, uint32_t* padding); - - /** - * SL Driver implementation of {@link ANeuralNetworksCompilation_setCaching}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksCompilation_setCaching}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksCompilation_setCaching)(ANeuralNetworksCompilation* compilation, - const char* cacheDir, const uint8_t* token); - - /** - * SL Driver implementation of {@link ANeuralNetworksCompilation_setPreference}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksCompilation_setPreference}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksCompilation_setPreference)(ANeuralNetworksCompilation* compilation, - int32_t preference); - - /** - * SL Driver implementation of {@link ANeuralNetworksCompilation_setPriority}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksCompilation_setPriority}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksCompilation_setPriority)(ANeuralNetworksCompilation* compilation, - int priority); - - /** - * SL Driver implementation of {@link ANeuralNetworksCompilation_setTimeout}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksCompilation_setTimeout}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksCompilation_setTimeout)(ANeuralNetworksCompilation* compilation, - uint64_t duration); - - /** - * SL Driver implementation of {@link ANeuralNetworksDevice_getExtensionSupport}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksDevice_getExtensionSupport}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksDevice_getExtensionSupport)(const ANeuralNetworksDevice* device, - const char* extensionName, - bool* isExtensionSupported); - - /** - * SL Driver implementation of {@link ANeuralNetworksDevice_getFeatureLevel}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksDevice_getFeatureLevel}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksDevice_getFeatureLevel)(const ANeuralNetworksDevice* device, - int64_t* featureLevel); - - /** - * SL Driver implementation of {@link ANeuralNetworksDevice_getName}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksDevice_getName}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksDevice_getName)(const ANeuralNetworksDevice* device, const char** name); - - /** - * SL Driver implementation of {@link ANeuralNetworksDevice_getType}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksDevice_getType}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksDevice_getType)(const ANeuralNetworksDevice* device, int32_t* type); - - /** - * SL Driver implementation of {@link ANeuralNetworksDevice_getVersion}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksDevice_getVersion}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksDevice_getVersion)(const ANeuralNetworksDevice* device, - const char** version); - - /** - * SL Driver implementation of {@link ANeuralNetworksDevice_wait}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksDevice_wait}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksDevice_wait)(const ANeuralNetworksDevice* device); - - /** - * SL Driver implementation of {@link ANeuralNetworksEvent_createFromSyncFenceFd}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksEvent_createFromSyncFenceFd}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksEvent_createFromSyncFenceFd)(int sync_fence_fd, - ANeuralNetworksEvent** event); - - /** - * SL Driver implementation of {@link ANeuralNetworksEvent_free}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksEvent_free}, - * at the feature level of this NnApiSLDriver struct. - */ - void (*ANeuralNetworksEvent_free)(ANeuralNetworksEvent* event); - - /** - * SL Driver implementation of {@link ANeuralNetworksEvent_getSyncFenceFd}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksEvent_getSyncFenceFd}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksEvent_getSyncFenceFd)(const ANeuralNetworksEvent* event, - int* sync_fence_fd); - - /** - * SL Driver implementation of {@link ANeuralNetworksEvent_wait}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksEvent_wait}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksEvent_wait)(ANeuralNetworksEvent* event); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_burstCompute}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_burstCompute}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_burstCompute)(ANeuralNetworksExecution* execution, - ANeuralNetworksBurst* burst); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_compute}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_compute}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_compute)(ANeuralNetworksExecution* execution); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_create}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_create}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_create)(ANeuralNetworksCompilation* compilation, - ANeuralNetworksExecution** execution); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_enableInputAndOutputPadding}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_enableInputAndOutputPadding}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_enableInputAndOutputPadding)(ANeuralNetworksExecution* execution, - bool enable); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_free}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_free}, - * at the feature level of this NnApiSLDriver struct. - */ - void (*ANeuralNetworksExecution_free)(ANeuralNetworksExecution* execution); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_getDuration}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_getDuration}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_getDuration)(const ANeuralNetworksExecution* execution, - int32_t durationCode, uint64_t* duration); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_getOutputOperandDimensions}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_getOutputOperandDimensions}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_getOutputOperandDimensions)(ANeuralNetworksExecution* execution, - int32_t index, uint32_t* dimensions); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_getOutputOperandRank}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_getOutputOperandRank}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_getOutputOperandRank)(ANeuralNetworksExecution* execution, - int32_t index, uint32_t* rank); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_setInput}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_setInput}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_setInput)(ANeuralNetworksExecution* execution, int32_t index, - const ANeuralNetworksOperandType* type, - const void* buffer, size_t length); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_setInputFromMemory}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_setInputFromMemory}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_setInputFromMemory)(ANeuralNetworksExecution* execution, - int32_t index, - const ANeuralNetworksOperandType* type, - const ANeuralNetworksMemory* memory, - size_t offset, size_t length); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_setLoopTimeout}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_setLoopTimeout}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_setLoopTimeout)(ANeuralNetworksExecution* execution, - uint64_t duration); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_setMeasureTiming}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_setMeasureTiming}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_setMeasureTiming)(ANeuralNetworksExecution* execution, - bool measure); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_setOutput}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_setOutput}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_setOutput)(ANeuralNetworksExecution* execution, int32_t index, - const ANeuralNetworksOperandType* type, void* buffer, - size_t length); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_setOutputFromMemory}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_setOutputFromMemory}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_setOutputFromMemory)(ANeuralNetworksExecution* execution, - int32_t index, - const ANeuralNetworksOperandType* type, - const ANeuralNetworksMemory* memory, - size_t offset, size_t length); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_setReusable}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_setReusable}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_setReusable)(ANeuralNetworksExecution* execution, bool reusable); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_setTimeout}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_setTimeout}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_setTimeout)(ANeuralNetworksExecution* execution, - uint64_t duration); - - /** - * SL Driver implementation of {@link ANeuralNetworksExecution_startComputeWithDependencies}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksExecution_startComputeWithDependencies}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksExecution_startComputeWithDependencies)( - ANeuralNetworksExecution* execution, const ANeuralNetworksEvent* const* dependencies, - uint32_t num_dependencies, uint64_t duration, ANeuralNetworksEvent** event); - - /** - * SL Driver implementation of {@link ANeuralNetworksMemoryDesc_addInputRole}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksMemoryDesc_addInputRole}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksMemoryDesc_addInputRole)(ANeuralNetworksMemoryDesc* desc, - const ANeuralNetworksCompilation* compilation, - uint32_t index, float frequency); - - /** - * SL Driver implementation of {@link ANeuralNetworksMemoryDesc_addOutputRole}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksMemoryDesc_addOutputRole}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksMemoryDesc_addOutputRole)(ANeuralNetworksMemoryDesc* desc, - const ANeuralNetworksCompilation* compilation, - uint32_t index, float frequency); - - /** - * SL Driver implementation of {@link ANeuralNetworksMemoryDesc_create}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksMemoryDesc_create}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksMemoryDesc_create)(ANeuralNetworksMemoryDesc** desc); - - /** - * SL Driver implementation of {@link ANeuralNetworksMemoryDesc_finish}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksMemoryDesc_finish}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksMemoryDesc_finish)(ANeuralNetworksMemoryDesc* desc); - - /** - * SL Driver implementation of {@link ANeuralNetworksMemoryDesc_free}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksMemoryDesc_free}, - * at the feature level of this NnApiSLDriver struct. - */ - void (*ANeuralNetworksMemoryDesc_free)(ANeuralNetworksMemoryDesc* desc); - - /** - * SL Driver implementation of {@link ANeuralNetworksMemoryDesc_setDimensions}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksMemoryDesc_setDimensions}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksMemoryDesc_setDimensions)(ANeuralNetworksMemoryDesc* desc, uint32_t rank, - const uint32_t* dimensions); - - /** - * SL Driver implementation of {@link ANeuralNetworksMemory_copy}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksMemory_copy}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksMemory_copy)(const ANeuralNetworksMemory* src, - const ANeuralNetworksMemory* dst); - - /** - * SL Driver implementation of {@link ANeuralNetworksMemory_createFromAHardwareBuffer}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksMemory_createFromAHardwareBuffer}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksMemory_createFromAHardwareBuffer)(const AHardwareBuffer* ahwb, - ANeuralNetworksMemory** memory); - - /** - * SL Driver implementation of {@link ANeuralNetworksMemory_createFromDesc}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksMemory_createFromDesc}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksMemory_createFromDesc)(const ANeuralNetworksMemoryDesc* desc, - ANeuralNetworksMemory** memory); - - /** - * SL Driver implementation of {@link ANeuralNetworksMemory_createFromFd}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksMemory_createFromFd}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksMemory_createFromFd)(size_t size, int protect, int fd, size_t offset, - ANeuralNetworksMemory** memory); - - /** - * SL Driver implementation of {@link ANeuralNetworksMemory_free}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksMemory_free}, - * at the feature level of this NnApiSLDriver struct. - */ - void (*ANeuralNetworksMemory_free)(ANeuralNetworksMemory* memory); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_addOperand}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_addOperand}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_addOperand)(ANeuralNetworksModel* model, - const ANeuralNetworksOperandType* type); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_addOperation}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_addOperation}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_addOperation)(ANeuralNetworksModel* model, - ANeuralNetworksOperationType type, uint32_t inputCount, - const uint32_t* inputs, uint32_t outputCount, - const uint32_t* outputs); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_create}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_create}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_create)(ANeuralNetworksModel** model); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_finish}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_finish}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_finish)(ANeuralNetworksModel* model); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_free}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_free}, - * at the feature level of this NnApiSLDriver struct. - */ - void (*ANeuralNetworksModel_free)(ANeuralNetworksModel* model); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_getExtensionOperandType}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_getExtensionOperandType}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_getExtensionOperandType)(ANeuralNetworksModel* model, - const char* extensionName, - uint16_t operandCodeWithinExtension, - int32_t* type); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_getExtensionOperationType}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_getExtensionOperationType}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_getExtensionOperationType)(ANeuralNetworksModel* model, - const char* extensionName, - uint16_t operationCodeWithinExtension, - ANeuralNetworksOperationType* type); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_getSupportedOperationsForDevices}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_getSupportedOperationsForDevices}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_getSupportedOperationsForDevices)( - const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, - uint32_t numDevices, bool* supportedOps); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_identifyInputsAndOutputs}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_identifyInputsAndOutputs}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_identifyInputsAndOutputs)(ANeuralNetworksModel* model, - uint32_t inputCount, - const uint32_t* inputs, - uint32_t outputCount, - const uint32_t* outputs); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_relaxComputationFloat32toFloat16}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_relaxComputationFloat32toFloat16}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16)(ANeuralNetworksModel* model, - bool allow); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_setOperandExtensionData}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_setOperandExtensionData}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_setOperandExtensionData)(ANeuralNetworksModel* model, int32_t index, - const void* data, size_t length); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_setOperandSymmPerChannelQuantParams}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_setOperandSymmPerChannelQuantParams}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_setOperandSymmPerChannelQuantParams)( - ANeuralNetworksModel* model, int32_t index, - const ANeuralNetworksSymmPerChannelQuantParams* channelQuant); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_setOperandValue}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_setOperandValue}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_setOperandValue)(ANeuralNetworksModel* model, int32_t index, - const void* buffer, size_t length); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_setOperandValueFromMemory}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_setOperandValueFromMemory}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_setOperandValueFromMemory)(ANeuralNetworksModel* model, - int32_t index, - const ANeuralNetworksMemory* memory, - size_t offset, size_t length); - - /** - * SL Driver implementation of {@link ANeuralNetworksModel_setOperandValueFromModel}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworksModel_setOperandValueFromModel}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworksModel_setOperandValueFromModel)(ANeuralNetworksModel* model, int32_t index, - const ANeuralNetworksModel* value); - - /** - * SL Driver implementation of {@link ANeuralNetworks_getDefaultLoopTimeout}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworks_getDefaultLoopTimeout}, - * at the feature level of this NnApiSLDriver struct. - */ - uint64_t (*ANeuralNetworks_getDefaultLoopTimeout)(); - - /** - * SL Driver implementation of {@link ANeuralNetworks_getDevice}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworks_getDevice}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworks_getDevice)(uint32_t devIndex, ANeuralNetworksDevice** device); - - /** - * SL Driver implementation of {@link ANeuralNetworks_getDeviceCount}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworks_getDeviceCount}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*ANeuralNetworks_getDeviceCount)(uint32_t* numDevices); - - /** - * SL Driver implementation of {@link ANeuralNetworks_getMaximumLoopTimeout}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworks_getMaximumLoopTimeout}, - * at the feature level of this NnApiSLDriver struct. - */ - uint64_t (*ANeuralNetworks_getMaximumLoopTimeout)(); - - /** - * SL Driver implementation of {@link ANeuralNetworks_getRuntimeFeatureLevel}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link ANeuralNetworks_getRuntimeFeatureLevel}, - * at the feature level of this NnApiSLDriver struct. - */ - int64_t (*ANeuralNetworks_getRuntimeFeatureLevel)(); - - /** - * SL Driver implementation of a function similar to - * {@link ANeuralNetworksCompilation_setCaching} that takes file descriptors - * instead of a cache directory. - * Behavior and outputs match NNAPI Runtime function - * {@link ANeuralNetworksCompilation_setCaching}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*SL_ANeuralNetworksCompilation_setCachingFromFds)(ANeuralNetworksCompilation* compilation, - const int* modelCacheFds, - const uint32_t numModelCacheFiles, - const int* dataCacheFds, - const uint32_t numDataCacheFiles, - const uint8_t* token); - - /** - * SL Driver implementation of {@link SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded)(const ANeuralNetworksDevice* device, - uint32_t* numModelCacheFiles, - uint32_t* numDataCacheFiles); - - /** - * SL Driver implementation of {@link SL_ANeuralNetworksDevice_getPerformanceInfo}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link SL_ANeuralNetworksDevice_getPerformanceInfo}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*SL_ANeuralNetworksDevice_getPerformanceInfo)( - const ANeuralNetworksDevice* device, int32_t performanceInfoKind, - SL_ANeuralNetworksPerformanceInfo* performanceInfo); - - /** - * SL Driver implementation of {@link - * SL_ANeuralNetworksDevice_forEachOperandPerformanceInfo}. Behavior, arguments, and - * outputs match NNAPI Runtime function - * {@link SL_ANeuralNetworksDevice_forEachOperandPerformanceInfo}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*SL_ANeuralNetworksDevice_forEachOperandPerformanceInfo)( - const ANeuralNetworksDevice* device, void* context, - void (*callback)(SL_ANeuralNetworksOperandPerformanceInfo, void*)); - - /** - * SL Driver implementation of {@link SL_ANeuralNetworksDevice_getVendorExtensionCount}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link SL_ANeuralNetworksDevice_getVendorExtensionCount}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*SL_ANeuralNetworksDevice_getVendorExtensionCount)(const ANeuralNetworksDevice* device, - uint32_t* vendorExtensionCount); - - /** - * SL Driver implementation of {@link SL_ANeuralNetworksDevice_getVendorExtensionName}. - * Behavior, arguments, and outputs match NNAPI Runtime function - * {@link SL_ANeuralNetworksDevice_getVendorExtensionName}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*SL_ANeuralNetworksDevice_getVendorExtensionName)(const ANeuralNetworksDevice* device, - uint32_t vendorExtensionIndex, - const char** extensionName); - - /** - * SL Driver implementation of {@link - * SL_ANeuralNetworksDevice_forEachVendorExtensionOperandTypeInformation}. Behavior, arguments, - * and outputs match NNAPI Runtime function - * {@link SL_ANeuralNetworksDevice_forEachVendorExtensionOperandTypeInformation}, - * at the feature level of this NnApiSLDriver struct. - */ - int (*SL_ANeuralNetworksDevice_forEachVendorExtensionOperandTypeInformation)( - const ANeuralNetworksDevice* device, uint32_t vendorExtensionIndex, void* context, - void (*callback)(SL_ANeuralNetworksExtensionOperandTypeInformation, void*)); -} NnApiSLDriverImplFL5; - -__END_DECLS
diff --git a/tools/api/NeuralNetworks.t b/tools/api/NeuralNetworks.t new file mode 100644 index 0000000..d7cf8b2 --- /dev/null +++ b/tools/api/NeuralNetworks.t
@@ -0,0 +1,2578 @@ +%% template file for generating NeuralNetworks.h. +%% see README.md. +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @addtogroup NeuralNetworks + * @{ + */ + +/** + * @file NeuralNetworks.h + */ + +#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H +#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H + +/****************************************************************** + * + * IMPORTANT NOTICE: + * + * This file is part of Android's set of stable system headers + * exposed by the Android NDK (Native Development Kit). + * + * Third-party source AND binary code relies on the definitions + * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. + * + * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) + * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS + * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY + * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES + */ + +#include <android/hardware_buffer.h> +#include <stddef.h> +#include <stdint.h> +#include <sys/cdefs.h> + +__BEGIN_DECLS + +%insert Operand_1.0_Comment +typedef enum { +%insert Operand_1.0 +%insert Operand_1.2 +%insert Operand_1.3 +} OperandCode; + +%insert Operation_1.0_Comment +typedef enum { + // Operations below are available since API level 27. + +%insert Operation_1.0 + + // Operations below are available since API level 28. + +%insert Operation_1.1 + + // Operations below are available since API level 29. + +%insert Operation_1.2 + + // Operations below are available since API level 30. + +%insert Operation_1.3 +} OperationCode; + +/** + * Fused activation function types. + * + * + * Available since API level 27. + */ +typedef enum { + /** NO fused activation function. */ + ANEURALNETWORKS_FUSED_NONE = 0, + /** Fused ReLU activation function. */ + ANEURALNETWORKS_FUSED_RELU = 1, + /** Fused ReLU1 activation function. */ + ANEURALNETWORKS_FUSED_RELU1 = 2, + /** Fused ReLU6 activation function. */ + ANEURALNETWORKS_FUSED_RELU6 = 3, +} FuseCode; + +/** + * Implicit padding algorithms. + * + * + * Available since API level 27. + */ +typedef enum { + /** + * SAME padding. + * Padding on both ends are the "same": + * padding_to_beginning = total_padding / 2 + * padding_to_end = (total_padding + 1)/2. + * i.e., for even number of padding, padding to both ends are exactly + * the same; for odd number of padding, padding to the ending is bigger + * than the padding to the beginning by 1. + * + * total_padding is a function of input, stride, dilation and filter size. + * It could be computed as follows: + * out_size = (input + stride - 1) / stride + * effective_filter_size = (filter_size - 1) * dilation + 1 + * needed_input = (out_size - 1) * stride + effective_filter_size + * total_padding = max(0, needed_input - input_size) + * The computation is the same for the horizontal and vertical directions. + */ + ANEURALNETWORKS_PADDING_SAME = 1, + + /** + * VALID padding. + * No padding. When the input size is not evenly divisible by + * the filter size, the input at the end that could not fill + * the whole filter tile will simply be ignored. + */ + ANEURALNETWORKS_PADDING_VALID = 2, +} PaddingCode; + +/** + * Execution preferences. + * + * Available since API level 27. + */ +typedef enum { + /** + * Prefer executing in a way that minimizes battery drain. + * This is desirable for compilations that will be executed often. + */ + ANEURALNETWORKS_PREFER_LOW_POWER = 0, + /** + * Prefer returning a single answer as fast as possible, even if this causes + * more power consumption. + */ + ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1, + /** + * Prefer maximizing the throughput of successive frames, for example when + * processing successive frames coming from the camera. + */ + ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2, +} PreferenceCode; + +/** + * Device types. + * + * The type of NNAPI device. + */ +typedef enum { + /** The device type cannot be provided. */ + ANEURALNETWORKS_DEVICE_UNKNOWN = 0, + /** The device does not fall into any category below. */ + ANEURALNETWORKS_DEVICE_OTHER = 1, + /** The device runs NNAPI models on single or multi-core CPU. */ + ANEURALNETWORKS_DEVICE_CPU = 2, + /** The device can run NNAPI models and also accelerate graphics APIs such + * as OpenGL ES and Vulkan. */ + ANEURALNETWORKS_DEVICE_GPU = 3, + /** Dedicated accelerator for Machine Learning workloads. */ + ANEURALNETWORKS_DEVICE_ACCELERATOR = 4, +} DeviceTypeCode; + +/** + * Result codes. + * + * <p>Any NNAPI function can return any result code, including result codes not + * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR} + * indicates a failure of some kind.</p> + * + * <p>Additional information about the nature of a failure can be obtained from + * the device log after enabling NNAPI debugging by setting the debug.nn.vlog + * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p> + * + * Available since API level 27. + */ +typedef enum { + /** + * Operation was succesful. + */ + ANEURALNETWORKS_NO_ERROR = 0, + + /** + * Failure caused by not enough available memory. + */ + ANEURALNETWORKS_OUT_OF_MEMORY = 1, + + ANEURALNETWORKS_INCOMPLETE = 2, + + /** + * Failure caused by unexpected null argument. + */ + ANEURALNETWORKS_UNEXPECTED_NULL = 3, + + /** + * Failure caused by invalid function arguments, invalid model definition, + * invalid execution definition or invalid data at execution time. + */ + ANEURALNETWORKS_BAD_DATA = 4, + + /** + * Failure caused by failed model execution. + */ + ANEURALNETWORKS_OP_FAILED = 5, + + /** + * Failure caused by object being in the wrong state. + */ + ANEURALNETWORKS_BAD_STATE = 6, + + /** + * Failure caused by not being able to map a file into memory. + * This may be caused by a file descriptor not being mappable, or an AHardwareBuffer + * not supported by the device. + * Mitigate by reading its content into memory. + */ + ANEURALNETWORKS_UNMAPPABLE = 7, + + /** + * Failure caused by insufficient buffer size provided to a model output. + */ + ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8, + + /** + * Failure caused by a device not being available. + */ + ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9, + + /** + * Failure because a deadline could not be met for a task, but future + * deadlines may still be met for the same task after a short delay. + * + * Available since API level 30. + */ + ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10, + + /** + * Failure because a deadline could not be met for a task, and future + * deadlines will likely also not be met for the same task even after a + * short delay. + * + * Available since API level 30. + */ + ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11, + + /** + * Failure because of a resource limitation within the driver, but future + * calls for the same task may still succeed after a short delay. + * + * Available since API level 30. + */ + ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12, + + /** + * Failure because of a resource limitation within the driver, and future + * calls for the same task will likely also fail even after a short + * delay. + * + * Available since API level 30. + */ + ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13, + + /** + * Failure indicating an object is in a dead state. + * + * Available since API level 30. + */ + ANEURALNETWORKS_DEAD_OBJECT = 14, +} ResultCode; + +/** + * For {@link ANeuralNetworksModel_setOperandValue}, values with a + * length smaller or equal to this will be immediately copied into + * the model. The size is in bytes. + * + * Available since API level 27. + */ +enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 }; + +/** + * For {@link ANeuralNetworksCompilation_setCaching}, specify the size + * of the cache token required from the application. The size is in bytes. + * + * Available since API level 29. + */ +enum { ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN = 32 }; + +/** + * Different duration measurements. + * + * Durations are measured in nanoseconds. + * + * Available since API level 29. + */ +typedef enum { + // Execution time on hardware (not driver, which runs on host processor). + ANEURALNETWORKS_DURATION_ON_HARDWARE = 0, + // Execution time in driver (including time on hardware). Excludes overhead + // such as that of the runtime itself and the IPC needed for the runtime to + // communicate with the driver. + ANEURALNETWORKS_DURATION_IN_DRIVER = 1, + // Execution time on hardware, after all dependencies have been signaled. + // If no dependencies specified (for example, if the execution was scheduled other + // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the + // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE. + // Available since API level 30. + ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2, + // Execution time in driver, after all dependencies have been signaled. Excludes + // overhead such as that of the runtime itself and the IPC needed for the runtime + // to communicate with the driver. + // If no dependencies specified (for example, if the execution was scheduled other + // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the + // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER. + // Available since API level 30. + ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3, +} DurationCode; + +/** + * Relative execution priority. + * + * Available since API level 30. + */ +typedef enum { + ANEURALNETWORKS_PRIORITY_LOW = 90, + ANEURALNETWORKS_PRIORITY_MEDIUM = 100, + ANEURALNETWORKS_PRIORITY_HIGH = 110, + ANEURALNETWORKS_PRIORITY_DEFAULT = ANEURALNETWORKS_PRIORITY_MEDIUM, +} PriorityCode; + +/** + * ANeuralNetworksMemory is an opaque type that represents memory. + * + * This type is used to represent shared memory, memory mapped files, + * and similar memories. + * + * By using shared memory, a program can efficiently communicate to the + * runtime and drivers the tensors that define a model. See + * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application + * should typically create one shared memory object that contains every constant tensor + * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be used to + * create shared memory from a file handle. + * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} can be used to + * create shared memory from an AHardwareBuffer handle. + * + * Memory objects can also be used to specify the input and output arguments of + * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory} + * and {@link ANeuralNetworksExecution_setOutputFromMemory}. + * + * When calling {@link ANeuralNetworksModel_setOperandValueFromMemory}, + * {@link ANeuralNetworksExecution_setInputFromMemory} and + * {@link ANeuralNetworksExecution_setOutputFromMemory}, each operand in the shared + * memory object must be aligned on a boundary of a byte size that is a multiple + * of the element type byte size, e.g., a tensor with + * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary. + * + * It is the application's responsibility to ensure that there are no uses of + * the memory after calling {@link ANeuralNetworksMemory_free}. This includes + * any model which references this memory because of a call to + * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation + * created using such a model, any execution object or burst object created + * using such a compilation, or any execution which references this memory + * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or + * {@link ANeuralNetworksExecution_setOutputFromMemory}. + * + * Available since API level 27. + * + * Starting at API level 30, the application may request creation of device native memory from + * {@link ANeuralNetworksMemoryDesc} to avoid potential memory copying and transformation + * overhead between executions. See also {@link ANeuralNetworksMemoryDesc} and + * {@link ANeuralNetworksMemory_createFromDesc}. + */ +typedef struct ANeuralNetworksMemory ANeuralNetworksMemory; + +/** + * ANeuralNetworksModel is an opaque type that contains a description of the + * mathematical operations that constitute the model. + * + * <p>Build the model by calling<ul> + * <li>{@link ANeuralNetworksModel_create}</li> + * <li>{@link ANeuralNetworksModel_addOperation}</li> + * <li>{@link ANeuralNetworksModel_addOperand}</li> + * </ul> + * + * This forms a graph in which each operation and operand is a node, a + * directed edge from an operand to an operation indicates that the + * operand is an input to the operation, and a directed edge from an + * operation to an operand indicates that the operand is an output + * from the operation. This graph must be acyclic. + * + * A model is completed by calling {@link ANeuralNetworksModel_finish}. + * A model is destroyed by calling {@link ANeuralNetworksModel_free}. + * + * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish} + * has been called on it.</p> + * + * <p>It is the application's responsibility to make sure that only one thread + * modifies a model at a given time. It is however safe for more than one + * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p> + * + * <p>It is also the application's responsibility to ensure that there are no + * other uses of the model after calling {@link ANeuralNetworksModel_free}. + * This includes any compilation, execution object or burst object created using + * the model.</p> + * + * Available since API level 27. + */ +typedef struct ANeuralNetworksModel ANeuralNetworksModel; + +/** + * ANeuralNetworksCompilation is an opaque type that can be used to compile + * a machine learning model. + * + * <p>To use:<ul> + * <li>Create a new compilation instance by calling the + * {@link ANeuralNetworksCompilation_create} function or + * {@link ANeuralNetworksCompilation_createForDevices}.</li> + * <li>Set any desired properties on the compilation (for example, + * {@link ANeuralNetworksCompilation_setPreference}).</li> + * <li>Optionally, set the caching signature and the cache directory on the + * compilation by calling {@link ANeuralNetworksCompilation_setCaching}.</li> + * <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li> + * <li>Use the compilation as many times as needed + * with {@link ANeuralNetworksExecution_create} and + * {@link ANeuralNetworksBurst_create}.</li> + * <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free} + * once all executions using the compilation have completed.</li></ul></p> + * + * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}. + * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}. + * + * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish} + * has been called on it.</p> + * + * <p>It is the application's responsibility to make sure that only + * one thread modifies a compilation at a given time. It is however + * safe for more than one thread to use the compilation once + * {@link ANeuralNetworksCompilation_finish} has returned.</p> + * + * <p>It is also the application's responsibility to ensure that there are no other + * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}. + * This includes any execution object or burst object created using the compilation, + * or any memory descriptor with the compilation as part of one of the roles specified by + * {@link ANeuralNetworksMemoryDesc_addInputRole} or + * {@link ANeuralNetworksMemoryDesc_addOutputRole}.</p> + * + * Available since API level 27. + */ +typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; + +/** + * ANeuralNetworksExecution is an opaque type that can be used to apply a machine + * learning model to a set of inputs. + * + * <p>To use:<ul> + * <li>Create a new execution instance by calling the + * {@link ANeuralNetworksExecution_create} function.</li> + * <li>Associate input buffers or memory regions to the model inputs with + * {@link ANeuralNetworksExecution_setInput} or + * {@link ANeuralNetworksExecution_setInputFromMemory}.</li> + * <li>Associate output buffers or memory regions to the model outputs with + * {@link ANeuralNetworksExecution_setOutput} or + * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li> + * <li>Apply the model with one of the following:</li><ul> + * <li>Asynchronously with {@link ANeuralNetworksExecution_startCompute} + * or with {@link ANeuralNetworksExecution_startComputeWithDependencies}, + * waiting for the execution to complete with + * {@link ANeuralNetworksEvent_wait}.</li> + * <li>Synchronously with {@link ANeuralNetworksExecution_compute}.</li> + * <li>Synchronously as part of an execution burst with + * {@link ANeuralNetworksExecution_burstCompute}.</li></ul> + * <li>Destroy the execution with + * {@link ANeuralNetworksExecution_free}.</li></ul></p> + * + * <p>An output buffer or memory region must not overlap with any + * other output buffer or memory region, with an input buffer or + * memory region, or with an operand value in a memory object + * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p> + * + * <p>An execution cannot be modified once + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies} has been called on it.</p> + * + * <p>An execution can be applied to a model with + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies} only once. Create new + * executions to do new evaluations of the model.</p> + * + * <p>It is the application's responsibility to make sure that only one thread + * modifies an execution at a given time. It is however safe for more than one + * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p> + * + * <p>It is also the application's responsibility to ensure that the execution + * either has never been scheduled or has completed (i.e., that + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, or + * {@link ANeuralNetworksEvent_wait} has returned) before calling + * {@link ANeuralNetworksExecution_free}.</p>. + * + * <p>It is also the application's responsibility to ensure that there are no other + * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p> + * + * <p>Multiple executions can be scheduled and evaluated concurrently, either by + * means of {@link ANeuralNetworksExecution_compute} or + * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous) in + * different threads, or by means of + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies} (which are asynchronous). + * (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on + * different burst objects.) The runtime makes no guarantee on the ordering of + * completion of executions. If it's important to the application, the + * application should enforce the ordering by ensuring that one execution + * completes before the next is scheduled (for example, by scheduling all + * executions synchronously within a single thread, or by scheduling all + * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between + * calls to {@link ANeuralNetworksExecution_startCompute}); or by using + * {@link ANeuralNetworksExecution_startComputeWithDependencies} to make the execution wait for a + * list of events to be signaled before starting the actual evaluation.</p> + * + * Available since API level 27. + */ +typedef struct ANeuralNetworksExecution ANeuralNetworksExecution; + +#if __ANDROID_API__ >= 29 +/** + * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand. + */ +typedef struct ANeuralNetworksSymmPerChannelQuantParams { + /* The index of the channel dimension. */ + uint32_t channelDim; + /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */ + uint32_t scaleCount; + /** The array of scaling values for each channel. Each value must be greater than zero. */ + const float* scales; +} ANeuralNetworksSymmPerChannelQuantParams; + +/** + * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency + * of a rapid sequence of executions. It will likely cause overhead if only used + * for a single execution. + * + * ANeuralNetworksBurst serves as a context object for any number of inferences + * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst + * object and the {@link ANeuralNetworksExecution} objects used with it must all + * have been created from the same {@link ANeuralNetworksCompilation} object. + * + * This object is also used as a hint to drivers, providing insight to the + * lifetime of a rapid sequence of executions. For example, a driver may choose + * to increase the clock frequency of its accelerator for the lifetime of a + * burst object. + * + * <p>To use:<ul> + * <li>Create a new burst object by calling the + * {@link ANeuralNetworksBurst_create} function.</li> + * <li>For each execution:</li><ul> + * <li>Create {@link ANeuralNetworksExecution} and configure its + * properties (see {@link ANeuralNetworksExecution} for details).</li> + * <li>Apply the model synchronously with + * {@link ANeuralNetworksExecution_burstCompute}, reusing the same + * {@link ANeuralNetworksBurst} with the new + * {@link ANeuralNetworksExecution}.</li> + * <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul> + * <li>Destroy the burst with + * {@link ANeuralNetworksBurst_free}.</li></ul></p> + * + * Available since API level 29. + */ +typedef struct ANeuralNetworksBurst ANeuralNetworksBurst; +#endif // __ANDROID_API__ >= 29 + +/** + * ANeuralNetworksOperandType describes the type of an operand. + * + * This structure is used to describe both scalars and tensors. + * + * A tensor operand type with all dimensions specified is "fully + * specified". Whenever possible (i.e., whenever the dimensions are + * known at model construction time), a tensor operand type should be + * (but is not required to be) fully specified, in order to enable the + * best possible performance. + * + * If a tensor operand's type is not fully specified, the dimensions + * of the operand are deduced from the operand types and values of the + * operation for which that operand is an output or from the corresponding + * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input + * operand type in the case of referenced model input operands. + * + * <p>In the following situations, a tensor operand type must be fully + * specified:<ul> + * <li>The operand has a constant value, set by + * {@link ANeuralNetworksModel_setOperandValue} (with a + * non-nullptr buffer) or + * {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li> + * <li>The operand is a model input (see + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main + * model within a compilation. A fully specified tensor operand type + * must either be provided to {@link ANeuralNetworksModel_addOperand}; + * or it must be provided to the corresponding + * {@link ANeuralNetworksExecution_setInput}, or + * {@link ANeuralNetworksExecution_setInputFromMemory}. + * EXCEPTION: If the input is optional and omitted + * (by passing nullptr for buffer to + * {@link ANeuralNetworksExecution_setInput}) then it need + * not have a fully specified tensor operand type.</li> + * <li>The operand is a model output (see + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main + * model within a compilation and is to be used with {@link + * ANeuralNetworksExecution_startComputeWithDependencies}. + * A fully specified tensor operand type must either be provided + * to {@link ANeuralNetworksModel_addOperand}; or it must be + * provided to the corresponding + * {@link ANeuralNetworksExecution_setOutput}, or + * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li></ul> + * + * A tensor operand type of specified rank but some number of + * unspecified dimensions is represented by setting dimensionCount to + * the rank and each unspecified dimension to 0. + * + * Available since API level 27. + * + * Starting at API level 29, a tensor operand type of unspecified rank is + * represented by setting dimensionCount to 0 and dimensions to NULL (just as if + * it were a scalar operand type). + */ +typedef struct ANeuralNetworksOperandType { + /** + * The data type, e.g ANEURALNETWORKS_FLOAT32. + */ + int32_t type; + + /** + * The number of dimensions (rank). + * + * Must be 0 for scalars. + */ + uint32_t dimensionCount; + + /** + * The dimensions of the tensor. + * + * Must be nullptr for scalars. + */ + const uint32_t* dimensions; + + /** + * The quantization scale. + * + * Must be 0 when not applicable to an operand type. + * + * See {@link OperandCode}. + */ + float scale; + + /** + * The quantization zero point. + * + * Must be 0 when not applicable to an operand type. + * + * See {@link OperandCode}. + */ + int32_t zeroPoint; +} ANeuralNetworksOperandType; + +typedef int32_t ANeuralNetworksOperationType; + +/** + * ANeuralNetworksEvent is an opaque type that represents an event + * that will be signaled once an execution completes. + * + * Available since API level 27. + */ +typedef struct ANeuralNetworksEvent ANeuralNetworksEvent; + +#if __ANDROID_API__ >= 29 + +/** + * ANeuralNetworksDevice is an opaque type that represents a device. + * + * This type is used to query basic properties and supported operations of the corresponding + * device, and control which device(s) a model is to be run on. + * + * Available since API level 29. + */ +typedef struct ANeuralNetworksDevice ANeuralNetworksDevice; + +#endif // __ANDROID_API__ >= 29 + +#if __ANDROID_API__ >= 30 + +/** + * ANeuralNetworksMemoryDesc is an opaque type that represents a memory descriptor. + * + * A memory descriptor describes the properties of a memory object, and is used by + * {@link ANeuralNetworksMemory_createFromDesc}. + * + * To use: + * - Create a new memory descriptor by calling {@link ANeuralNetworksMemoryDesc_create}. + * - Specify all of the intended input and output roles by calling + * {@link ANeuralNetworksMemoryDesc_addInputRole} and + * {@link ANeuralNetworksMemoryDesc_addOutputRole}. + * - Optionally, specify the memory dimensions by calling + * {@link ANeuralNetworksMemoryDesc_setDimensions}. + * - Complete the memory descriptor with {@link ANeuralNetworksMemoryDesc_finish}. + * - Use the memory descriptor as many times as needed with + * {@link ANeuralNetworksMemory_createFromDesc}. + * - Destroy the memory descriptor with {@link ANeuralNetworksMemoryDesc_free}. + * + * A memory descriptor is completed by calling {@link ANeuralNetworksMemoryDesc_finish}. + * A memory descriptor is destroyed by calling {@link ANeuralNetworksMemoryDesc_free}. + * + * A memory descriptor must not be modified once {@link ANeuralNetworksMemoryDesc_finish} + * has been called on it. + * + * It is the application's responsibility to make sure that only + * one thread modifies a memory descriptor at a given time. It is however + * safe for more than one thread to use the memory descriptor once + * {@link ANeuralNetworksMemoryDesc_finish} has returned. + * + * It is also the application's responsibility to ensure that there are no other + * uses of the memory descriptor after calling {@link ANeuralNetworksMemoryDesc_free}. + * It is however safe to continue using a {@link ANeuralNetworksMemory} object created + * from the memory descriptor. + * + * Available since API level 30. + */ +typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc; + +/** + * Create a {@link ANeuralNetworksMemoryDesc} with no properties. + * + * This only creates the memory descriptor. Its properties should be set with calls to + * {@link ANeuralNetworksMemoryDesc_addInputRole}, + * {@link ANeuralNetworksMemoryDesc_addOutputRole}, and + * {@link ANeuralNetworksMemoryDesc_setDimensions}. + * + * {@link ANeuralNetworksMemoryDesc_finish} must be called once all properties have been set. + * + * {@link ANeuralNetworksMemoryDesc_free} must be called once the memory descriptor + * is no longer needed. + * + * Available since API level 30. + * + * @param desc The {@link ANeuralNetworksMemoryDesc} to be created. + * Set to NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksMemoryDesc_create(ANeuralNetworksMemoryDesc** desc) __INTRODUCED_IN(30); + +/** + * Destroy a memory descriptor. + * + * The memory descriptor need not have been finished by a call to + * {@link ANeuralNetworksMemoryDesc_finish}. + * + * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. + * + * Available since API level 30. + * + * @param desc The memory descriptor to be destroyed. Passing NULL is acceptable and + * results in no operation. + */ +void ANeuralNetworksMemoryDesc_free(ANeuralNetworksMemoryDesc* desc) __INTRODUCED_IN(30); + +/** + * Specify that a memory object will be playing the role of an input to an execution created from a + * particular compilation. + * + * The compilation and the input index fully specify an input operand. This function + * may be invoked multiple times on the same memory descriptor with different input operands, + * and the same input operand may be specified on multiple memory descriptors. However, + * specifying the same input operand on the same memory descriptor more than once will + * return an error. + * + * The dimensions of the corresponding model operands of all the roles specified by + * {@link ANeuralNetworksMemoryDesc_addInputRole} and + * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two + * dimensions are incompatible if both ranks are fully specified but have different values, or if + * there is at least one axis that is fully specified in both but has different values. + * + * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and + * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on a memory descriptor + * before invoking {@link ANeuralNetworksMemoryDesc_finish}. + * + * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been + * called will return an error. + * + * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. + * + * Available since API level 30. + * + * @param desc The memory descriptor to be modified. + * @param compilation The compilation object. It must already have been finished by calling + * {@link ANeuralNetworksCompilation_finish}, and must outlive the memory + * descriptor. + * @param index The index of the input argument we are referencing from the compilation. It is + * an index into the inputs list passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link ANeuralNetworksModel_addOperand}. + * @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the + * memory is to be used in the specified role. This is provided as a hint to + * optimize the case when different roles prefer different memory locations or data + * layouts. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksMemoryDesc_addInputRole(ANeuralNetworksMemoryDesc* desc, + const ANeuralNetworksCompilation* compilation, + uint32_t index, float frequency) __INTRODUCED_IN(30); + +/** + * Specify that a memory object will be playing the role of an output to an execution created from a + * particular compilation. + * + * The compilation and the output index fully specify an output operand. This function + * may be invoked multiple times on the same memory descriptor with different output operands, + * and the same output operand may be specified on multiple memory descriptors. However, + * specifying the same output operand on the same memory descriptor object more than once will + * return an error. + * + * The dimensions of the corresponding model operands of all the roles specified by + * {@link ANeuralNetworksMemoryDesc_addInputRole} and + * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each other. Two + * dimensions are incompatible if both ranks are fully specified but have different values, or if + * there is at least one axis that is fully specified in both but has different values. + * + * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and + * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on the memory descriptor + * before invoking {@link ANeuralNetworksMemoryDesc_finish}. + * + * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been + * called will return an error. + * + * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. + * + * Available since API level 30. + * + * @param desc The memory descriptor to be modified. + * @param compilation The compilation object. It must already have been finished by calling + * {@link ANeuralNetworksCompilation_finish}, and must outlive the memory + * descriptor. + * @param index The index of the output argument we are referencing from the compilation. It is + * an index into the outputs list passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link ANeuralNetworksModel_addOperand}. + * @param frequency A floating-point value within the range (0.0, 1.0]. Describes how likely the + * memory is to be used in the specified role. This is provided as a hint to + * optimize the case when multiple roles prefer different memory locations or data + * layouts. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksMemoryDesc_addOutputRole(ANeuralNetworksMemoryDesc* desc, + const ANeuralNetworksCompilation* compilation, + uint32_t index, float frequency) __INTRODUCED_IN(30); + +/** + * Set the dimensional information of the memory descriptor. + * + * The specified dimensions must be compatible with the dimensions of the corresponding model + * operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and + * {@link ANeuralNetworksMemoryDesc_addOutputRole}. Two dimensions are incompatible if both ranks + * are fully specified but have different values, or if there is at least one axis that is fully + * specified in both but has different values. + * + * Attempting to modify a memory descriptor once {@link ANeuralNetworksMemoryDesc_finish} has been + * called will return an error. + * + * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. + * + * Available since API level 30. + * + * @param desc The memory descriptor to be modified. + * @param rank The number of dimensions. Must be 0 for scalars. + * @param dimensions An array of dimensions. An entry with the value 0 indicates that the + * corresponding axis has an unknown size. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksMemoryDesc_setDimensions(ANeuralNetworksMemoryDesc* desc, uint32_t rank, + const uint32_t* dimensions) __INTRODUCED_IN(30); + +/** + * Indicate that we have finished modifying a memory descriptor. Required before calling + * {@link ANeuralNetworksMemory_createFromDesc}. + * + * This function must only be called once for a given memory descriptor. + * + * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage. + * + * Available since API level 30. + * + * @param desc The memory descriptor to be finished. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksMemoryDesc_finish(ANeuralNetworksMemoryDesc* desc) __INTRODUCED_IN(30); + +/** + * Creates a memory object from a memory descriptor. + * + * The memory object is created with an uninitialized buffer. A memory object with an uninitialized + * buffer may only be used according to the roles specified by {@link + * ANeuralNetworksMemoryDesc_addOutputRole}, or as the destination memory in {@link + * ANeuralNetworksMemory_copy}. The buffer of a memory object is initialized after the memory object + * is used as an output in a successful execution, or used as the destination memory in a successful + * {@link ANeuralNetworksMemory_copy}. A memory object with an initialized buffer may be used + * according to all roles specified in {@link ANeuralNetworksMemoryDesc}, or as the source or + * destination memory in {@link ANeuralNetworksMemory_copy}. The buffer of a memory object will + * return to the uninitialized state if the memory object is used as an output in a failed + * execution, or used as the destination memory in a failed {@link ANeuralNetworksMemory_copy}. + * + * The dimensions of the memory descriptor are deduced from the dimensions of the corresponding + * model operands of all the roles specified by {@link ANeuralNetworksMemoryDesc_addInputRole} and + * {@link ANeuralNetworksMemoryDesc_addOutputRole}, as well as the dimensions set by the call to + * {@link ANeuralNetworksMemoryDesc_setDimensions}, if any. The memory descriptor may have + * unspecified dimensions or rank. In such a case, the same memory object may be used with different + * shapes of outputs in different executions. When the memory is used as an input, the input shape + * must be the same as the output shape from the last execution using this memory object as an + * output, or the last {@link ANeuralNetworkMemory_copy} using this memory object as the destination + * memory. Creating a memory object with unspecified dimensions or rank may fail for certain sets of + * roles. + * + * Using the memory in roles or shapes that are not compatible with the rules specified above will + * return an error. + * + * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or + * {@link ANeuralNetworksExecution_setOutputFromMemory} with the memory object, + * both offset and length must be set to zero and the entire memory region will be + * associated with the specified input or output operand. + * + * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with the memory created from this + * function will return an error. + * + * {@link ANeuralNetworksMemory_free} must be called once the memory is no longer needed. + * + * Attempting to create memory from an unfinished memory descriptor will return an error. + * + * The provided {@link ANeuralNetworksMemoryDesc} need not outlive the {@link ANeuralNetworksMemory} + * object. + * + * Available since API level 30. + * + * @param desc The memory descriptor. + * @param memory The memory object to be created. + * Set to NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful; ANEURALNETWORKS_OP_FAILED if the memory is + * created with unspecified dimensions or rank and it is not supported for this set of + * roles. + */ +int ANeuralNetworksMemory_createFromDesc(const ANeuralNetworksMemoryDesc* desc, + ANeuralNetworksMemory** memory) __INTRODUCED_IN(30); + +/** + * Copies data from one memory object to another. + * + * If at most one of the src and dst is created from {@link ANeuralNetworksMemory_createFromDesc}, + * the src and dst must have the same logical size: + * - If the memory is created from {@link ANeuralNetworksMemory_createFromFd}, or if it is created + * from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with format of + * AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size of the memory. + * - If the memory is created from {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with a + * format other than AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size when there is + * no padding and the data is tightly packed. This function may fail if the AHardwareBuffer + * cannot be accessed. + * - If the memory is created from {@link ANeuralNetworksMemory_createFromDesc}, the logical size + * equals the size indicated by the {@link OperandCode} multiplied by the number of elements. This + * function will fail if the number of elements is unknown. + * + * If both src and dst are created from {@link ANeuralNetworksMemory_createFromDesc}, they must have + * compatible dimensions. Two dimensions are incompatible if both ranks are fully specified but + * have different values, or if there is at least one axis that is fully specified in both but has + * different values. The dst may have unspecified dimensions or rank. In such a case, the dimensions + * of dst will get updated according to the dimensions of the src. + * + * In both cases, if the src is created from {@link ANeuralNetworksMemory_createFromDesc}, it must + * have been used as an output in a successful execution, or used as the destination memory in a + * successful {@link ANeuralNetworksMemory_copy}. + * + * The src and dst may have different data layout, in which case the data copying is performed + * logically with data layout transformation. + * + * Available since API level 30. + * + * @param src The source memory object. + * @param dst The destination memory object. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksMemory_copy(const ANeuralNetworksMemory* src, const ANeuralNetworksMemory* dst) + __INTRODUCED_IN(30); + +#endif // __ANDROID_API__ >= 30 + +#if __ANDROID_API__ >= 29 + +/** + * Get the number of available devices. + * + * @param numDevices Used to return the number of devices. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) __INTRODUCED_IN(29); + +/** + * Get the representation of the specified device. + * + * @param devIndex The index of the specified device. Must be less than the + number of available devices. + * @param device The representation of the specified device. + * The same representation will always be returned for the specified + * device. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice** device) + __INTRODUCED_IN(29); + +/** + * Get the name of the specified device. + * + * @param device The representation of the specified device. + * @param name The returned name of the specified device. The name will be in UTF-8 + * and will be null-terminated. It will be recognizable as a known device name + * rather than a cryptic string. For devices with feature level reported by + * {@link ANeuralNetworksDevice_getFeatureLevel} that is 29 and above, the + * format of the name is {VENDOR}-{DEVICE}. For devices with feature level 28 + * or lower, the format of the name is undefined. + * The name will remain valid for the duration of the application. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice* device, const char** name) + __INTRODUCED_IN(29); + +/** + * Get the type of a given device. + * + * The device type can be used to help application developers to distribute Machine Learning + * workloads and other workloads such as graphical rendering. + * E.g., for an app which renders AR scenes based on real time object detection results, + * the developer could choose an ACCELERATOR type device for ML workloads, and reserve GPU + * for graphical rendering. + * + * @param device The representation of the specified device. + * @param type The returned {@link DeviceTypeCode} of the specified device. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +int ANeuralNetworksDevice_getType(const ANeuralNetworksDevice* device, int32_t* type) + __INTRODUCED_IN(29); + +/** + * Get the version of the driver implementation of the specified device. + * + * It’s the responsibility of the driver implementor to insure that this version string + * uniquely distinguishes this implementation from all previous implementations. + * + * This version string must not be confused with the feature level which is solely defined + * by {@link ANeuralNetworksDevice_getFeatureLevel}. There is no implicit ordering of the versions. + * For example, it is not possible to filter all drivers older than a certain version. + * + * Application developers may use this version string to avoid or prefer specific driver + * implementations. For example, an application may want to do so because: + * - A specific version of the driver does not provide the required performance, + * perhaps because of a performance regression. + * - A specific version of the driver has a bug or returns results that don’t match + * the minimum precision requirement for the application. + * + * @param device The representation of the specified device. + * @param version The returned version string of the driver for the specified device. The + * string will be in UTF-8 and will be null-terminated. For devices with feature + * level 28 or lower, "UNKNOWN" will be returned. The version string will remain + * valid for the duration of the application. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice* device, const char** version) + __INTRODUCED_IN(29); + +/** + * Get the supported NNAPI version of the specified device. + * + * Each device has a supported feature level, which is the most advanced feature this driver + * implements. For example, if the driver implements the features introduced in Android P, + * but does not implement the features introduced after Android P, the value would be 28. + * Developers could decide whether or not the specified device should be used for a Model that + * has certain feature requirements. + * + * @param device The representation of the specified device. + * @param featureLevel The API level of the most advanced feature this driver implements. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device, + int64_t* featureLevel) __INTRODUCED_IN(29); + +#if __ANDROID_API__ >= 30 + +/** + * Wait until the device is in a live state. + * + * A device may encounter internal errors and temporarily enter a dead state. A + * call that uses a device in such a state will return with the error + * {@link ANEURALNETWORKS_DEAD_OBJECT}. ANeuralNetworksDevice_wait will block until + * the device is in a live state. + * + * @param device The representation of the specified device. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 30. + */ +int ANeuralNetworksDevice_wait(const ANeuralNetworksDevice* device) __INTRODUCED_IN(30); + +#endif // __ANDROID_API__ >= 30 + +/** + * Get the supported operations for a specified set of devices. If multiple devices + * are selected, the supported operation list is a union of supported operations of all + * selected devices. + * + * @param model The model to be queried. + * @param devices The set of devices. Must not contain duplicates. + * @param numDevices The number of devices in the set. + * @param supportedOps The boolean array to be filled. True means supported. The size of the + * boolean array must be at least as large as the number of operations + * in the model. The order of elements in the supportedOps array matches + * the order in which the corresponding operations were added to the model. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +int ANeuralNetworksModel_getSupportedOperationsForDevices( + const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, + uint32_t numDevices, bool* supportedOps) __INTRODUCED_IN(29); + +/** + * Create a {@link ANeuralNetworksCompilation} to compile the given model for a specified set + * of devices. If more than one device is specified, the compilation will + * distribute the workload automatically across the devices. The model must be fully + * supported by the specified set of devices. This means that + * ANeuralNetworksModel_getSupportedOperationsForDevices() must have returned true for every + * operation for that model/devices pair. + * + * The user must handle all compilation and execution failures from the + * specified set of devices. This is in contrast to a use of {@link + * ANeuralNetworksCompilation_create}, where the runtime will attempt to recover + * from such failures. + * + * The model passed to this function is termed the "main model" of the + * compilation, to distinguish it from other models referred to by an Operand + * of type {@link ANEURALNETWORKS_MODEL} within this compilation. + * + * @param model The {@link ANeuralNetworksModel} to be compiled. + * @param devices The set of devices. Must not contain duplicates. + * @param numDevices The number of devices in the set. + * @param compilation The newly created object or NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA + * if the model is invalid. + * + * Available since API level 29. + */ +int ANeuralNetworksCompilation_createForDevices(ANeuralNetworksModel* model, + const ANeuralNetworksDevice* const* devices, + uint32_t numDevices, + ANeuralNetworksCompilation** compilation) + __INTRODUCED_IN(29); + +/** + * Sets the compilation caching signature and the cache directory. + * + * Provides optional caching information to the runtime for faster repeated + * compilation. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. + * + * @param compilation The compilation to be modified. + * @param cacheDir The cache directory for the runtime to store and retrieve caching + * data. It is recommended to use the code cache directory provided + * by the Android runtime. If not using the code cache directory, the + * user should choose a directory local to the application, and is + * responsible for managing the cache entries. + * @param token The token provided by the user to specify a model must be of length + * ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should ensure that + * the token is unique to a model within the application. The NNAPI + * runtime cannot detect token collisions; a collision will result in a + * failed execution or in a successful execution that produces incorrect + * output values. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +int ANeuralNetworksCompilation_setCaching(ANeuralNetworksCompilation* compilation, + const char* cacheDir, const uint8_t* token) + __INTRODUCED_IN(29); + +/** + * Schedule synchronous evaluation of the execution. + * + * <p>Schedules synchronous evaluation of the execution. Returns once the + * execution has completed and the outputs are ready to be consumed. + * </p> + * + * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution, + * and the execution is not able to complete before the timeout duration is + * exceeded, then execution may be aborted, in which case + * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned. If the device has + * a feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} + * that is lower than 30, then the timeout duration hint will be ignored. + * + * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and + * the condition model does not output false within the loop timeout duration, + * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} + * will be returned. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution. + * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution. + * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for + * asynchronous execution with dependencies. + * + * Available since API level 29. + * + * @param execution The execution to be scheduled and executed. + * + * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. + * ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot + * be properly mapped. + */ +int ANeuralNetworksExecution_compute(ANeuralNetworksExecution* execution) __INTRODUCED_IN(29); + +/** + * Get the dimensional information of the specified output operand of the model of the + * {@link ANeuralNetworksExecution}. + * + * The execution must have completed. On asynchronous execution initiated by + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies}, + * {@link ANeuralNetworksEvent_wait} must be called prior to this function. + * + * @param execution The execution to be queried. + * @param index The index of the output argument we are querying. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link ANeuralNetworksModel_addOperand}. + * @param rank The rank of the output operand. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE + * if the target output is provided an insufficient buffer at execution time, + * ANEURALNETWORKS_BAD_DATA if the index is invalid. + * + * Available since API level 29. + */ +int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution* execution, + int32_t index, uint32_t* rank) + __INTRODUCED_IN(29); + +/** + * Get the dimensional information of the specified output operand of the model of the + * {@link ANeuralNetworksExecution}. The target output operand cannot be a scalar. + * + * The execution must have completed. On asynchronous execution initiated by + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies}, + * {@link ANeuralNetworksEvent_wait} must be called prior to this function. + * + * @param execution The execution to be queried. + * @param index The index of the output argument we are querying. It is an index into the lists + * passed to {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link ANeuralNetworksModel_addOperand}. + * @param dimensions The dimension array to be filled. The size of the array must be exactly as + * large as the rank of the output operand to be queried in the model. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE + * if the target output is provided an insufficient buffer at execution time, + * ANEURALNETWORKS_BAD_DATA if the index is invalid or if the target is a scalar. + * + * Available since API level 29. + */ +int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution* execution, + int32_t index, uint32_t* dimensions) + __INTRODUCED_IN(29); + +/** + * Create a {@link ANeuralNetworksBurst} to apply the given compilation. + * This only creates the burst object. Computation is only performed once + * {@link ANeuralNetworksExecution_burstCompute} is invoked with a valid + * {@link ANeuralNetworksExecution} and {@link ANeuralNetworksBurst}. + * + * <p>The provided compilation must outlive the burst object.</p> + * + * Available since API level 29. + * + * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. + * @param burst The newly created object or NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA + * if the compilation is invalid. + */ +int ANeuralNetworksBurst_create(ANeuralNetworksCompilation* compilation, + ANeuralNetworksBurst** burst) __INTRODUCED_IN(29); + +/** + * Destroys the burst object. + * + * Available since API level 29. + * + * @param burst The burst object to be destroyed. Passing NULL is acceptable and + * results in no operation. + */ +void ANeuralNetworksBurst_free(ANeuralNetworksBurst* burst) __INTRODUCED_IN(29); + +/** + * Schedule synchronous evaluation of the execution on a burst object. + * + * <p>Schedules synchronous evaluation of the execution. Returns once the + * execution has completed and the outputs are ready to be consumed.</p> + * + * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution, + * and the execution is not able to complete before the timeout duration is + * exceeded, then execution may be aborted, in which case + * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned. + * + * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and + * the condition model does not output false within the loop timeout duration, + * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} + * will be returned. If the device has a feature level reported by + * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the + * timeout duration hint will be ignored. + * + * <p>There must be at most one {@link ANeuralNetworksExecution} processing at + * any given time for any given burst object. Any + * {@link ANeuralNetworksExecution} launched before the previous has finished + * will result in ANEURALNETWORKS_BAD_STATE.</p> + * + * See {@link ANeuralNetworksExecution_compute} for synchronous execution. + * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution. + * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for + * asynchronous execution with dependencies. + * + * Available since API level 29. + * + * @param burst The burst object to execute on. + * @param execution The execution to be scheduled and executed. The execution + * must be created from the same {@link + * ANeuralNetworksCompilation} as the burst object. + * + * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. + */ +int ANeuralNetworksExecution_burstCompute(ANeuralNetworksExecution* execution, + ANeuralNetworksBurst* burst) __INTRODUCED_IN(29); + +/** + * Creates a shared memory object from an AHardwareBuffer handle. + * + * If the shared memory is backed by an AHardwareBuffer of AHARDWAREBUFFER_FORMAT_BLOB + * format, it can be used the same way as shared memory created from a file handle. See + * {@link ANeuralNetworksMemory} for a description on how to use this shared memory. + * + * If the shared memory is backed by an AHardwareBuffer of a format other than + * AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for Model inputs and outputs. + * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or + * {@link ANeuralNetworksExecution_setOutputFromMemory} with the shared memory, both + * offset and length must be set to zero and the entire memory region will be + * associated with the specified input or output operand. There is no guarantee + * that an arbitrary AHardwareBuffer_Format and AHardwareBuffer_UsageFlags combination + * can be used by arbitrary devices. The execution will fail if the selected set of + * devices cannot consume the buffer. + * + * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with shared memory + * backed by an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB is + * disallowed. + * + * The provided AHardwareBuffer must outlive the ANeuralNetworksMemory object. + * + * Available since API level 29. + * + * @param ahwb The AHardwareBuffer handle. + * @param memory The memory object to be created. + * Set to NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. + * + * @see AHardwareBuffer + */ +int ANeuralNetworksMemory_createFromAHardwareBuffer(const AHardwareBuffer* ahwb, + ANeuralNetworksMemory** memory) + __INTRODUCED_IN(29); + +/** + + * Specifies whether duration of the {@link ANeuralNetworksExecution} is to be + * measured. Evaluation of the execution must not have been scheduled. + * + * By default, duration is not measured. + * + * The {@link ANeuralNetworksExecution} must have been created from an + * {@link ANeuralNetworksCompilation} which in turn was created from + * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1. + * If the device has a feature level reported by + * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 29, then the + * duration will not be measured. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * Available since API level 29. + * + * @param execution The execution to be modified. + * @param measure 'true' if duration is to be measured, 'false' if not. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksExecution_setMeasureTiming(ANeuralNetworksExecution* execution, bool measure) + __INTRODUCED_IN(29); + +/** + * Get the time spent in the specified {@link ANeuralNetworksExecution}, in nanoseconds. + * + * The execution must have completed. On asynchronous execution initiated by + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies}, + * {@link ANeuralNetworksEvent_wait} must be called prior to this function. + * + * @param execution The execution to be queried. + * @param durationCode The measurement to be queried, specified by {@link DurationCode}. + * @param duration The returned duration. If no measurement was requested by + * {@link ANeuralNetworksExecution_setMeasureTiming}, if the + * device is has a feature level reported by + * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower + * than 29, or for some other reason the duration is not + * available, UINT64_MAX will be returned. A particular device + * need not support any given measurement. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +int ANeuralNetworksExecution_getDuration(const ANeuralNetworksExecution* execution, + int32_t durationCode, uint64_t* duration) + __INTRODUCED_IN(29); + +#endif // __ANDROID_API__ >= 29 + +#if __ANDROID_API__ >= 27 + +/** + * Creates a shared memory object from a file descriptor. + * + * The shared memory is backed by a file descriptor via mmap. + * See {@link ANeuralNetworksMemory} for a description on how to use + * this shared memory. + * + * Available since API level 27. + * + * @param size The requested size in bytes. + * Must not be larger than the file size. + * @param prot The desired memory protection for the mapping. + * It is either PROT_NONE or the bitwise OR of one or + * more of the following flags: PROT_READ, PROT_WRITE. + * @param fd The requested file descriptor. + * The file descriptor has to be mmap-able. The file + * descriptor will be duplicated. + * @param offset The offset to the beginning of the file of the area to map. + * The offset has to be aligned to a page size. + * @param memory The memory object to be created. + * Set to NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. + */ +int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset, + ANeuralNetworksMemory** memory) __INTRODUCED_IN(27); + +/** + * Delete a memory object. + * + * Destroys the object used by the run time to keep track of the memory. + * This will free the underlying actual memory if no other code has open + * handles to this memory. + * + * Available since API level 27. + * + * @param memory The memory object to be freed. Passing NULL is acceptable and + * results in no operation. + */ +void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __INTRODUCED_IN(27); + +/** + * Create an empty {@link ANeuralNetworksModel}. + * + * <p>This only creates the object. Computation is performed once + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked. + * + * The model should be constructed with calls to + * {@link ANeuralNetworksModel_addOperation} and + * {@link ANeuralNetworksModel_addOperand} + * + * <p>{@link ANeuralNetworksModel_finish} should be called once the model + * has been fully constructed.</p> + * + * <p>{@link ANeuralNetworksModel_free} should be called once the model + * is no longer needed.</p> + * + * Available since API level 27. + * + * @param model The {@link ANeuralNetworksModel} to be created. + * Set to NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksModel_create(ANeuralNetworksModel** model) __INTRODUCED_IN(27); + +/** + * Destroy a model. + * + * The model need not have been finished by a call to + * {@link ANeuralNetworksModel_finish}. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param model The model to be destroyed. Passing NULL is acceptable and + * results in no operation. + */ +void ANeuralNetworksModel_free(ANeuralNetworksModel* model) __INTRODUCED_IN(27); + +/** + * Indicate that we have finished modifying a model. Required before + * calling {@link ANeuralNetworksCompilation_create} and + * {@link ANeuralNetworksCompilation_createForDevices}. + * + * An application must ensure that no other thread uses the model at the same + * time. + * + * This function must only be called once for a given model. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param model The model to be finished. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) __INTRODUCED_IN(27); + +/** + * Add an operand to a model. + * + * The order in which the operands are added is important. The first one added + * to a model will have the index value 0, the second 1, etc. These indexes are + * used as operand identifiers in + * {@link ANeuralNetworksModel_addOperation}, + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}, + * {@link ANeuralNetworksModel_setOperandValue}, + * {@link ANeuralNetworksModel_setOperandValueFromMemory}, + * {@link ANeuralNetworksExecution_setInput}, + * {@link ANeuralNetworksExecution_setInputFromMemory}, + * {@link ANeuralNetworksExecution_setOutput}, + * {@link ANeuralNetworksExecution_setOutputFromMemory} and + * {@link ANeuralNetworksExecution_setOperandValue}. + * + * <p>Every operand must be referenced in exactly one of the following + * ways:<ul> + * <li>It is identified as a model input with + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</li> + * <li>It is identified as a constant with + * {@link ANeuralNetworksModel_setOperandValue} or + * {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li> + * <li>It is identified as an output of exactly one operation with + * {@link ANeuralNetworksModel_addOperation}.</li></p> + * <p>An operand that is identified as a model input or as a constant + * must not also be identified as a model output with + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</p> + * + * To build a model that can accommodate inputs of various sizes, as + * you may want to do for a CNN, leave unspecified the dimensions that + * will vary at run time. If you do so, fully specify dimensions + * when calling {@link ANeuralNetworksExecution_setInput} or + * {@link ANeuralNetworksExecution_setInputFromMemory}. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been + * called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param model The model to be modified. + * @param type The {@link ANeuralNetworksOperandType} that describes the shape + * of the operand. Neither the {@link ANeuralNetworksOperandType} + * nor the dimensions it points to need to outlive the call to + * {@link ANeuralNetworksModel_addOperand}. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, + const ANeuralNetworksOperandType* type) __INTRODUCED_IN(27); + +/** + * Sets an operand to a constant value. + * + * Values of length smaller or equal to + * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES} + * are immediately copied into the model. + * + * For values of length greater than + * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}, a pointer to + * the buffer is stored within the model. The application must not change the + * content of this region until all executions using this model have + * completed. As the data may be copied during processing, modifying the data + * after this call yields undefined results. The provided buffer must outlive + * this model. + * + * For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory} + * is likely to be more efficient. + * + * To indicate that an optional operand should be considered missing, + * pass nullptr for buffer and 0 for length. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been + * called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param model The model to be modified. + * @param index The index of the model operand we're setting. + * @param buffer A pointer to the data to use. + * @param length The size in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index, + const void* buffer, size_t length) __INTRODUCED_IN(27); + +#if __ANDROID_API__ >= 29 + +/** + * Sets an operand's per channel quantization parameters. + * + * Sets parameters required by a tensor of type + * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}. + * This function must be called for every tensor of type + * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} before + * calling {@link ANeuralNetworksModel_finish}. + * + * Available since API level 29. + * + * @param model The model to be modified. + * @param index The index of the model operand we're setting. + * @param channelQuant The per channel quantization parameters for the operand. + * No memory in this struct needs to outlive the call to + * this function. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams( + ANeuralNetworksModel* model, int32_t index, + const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) __INTRODUCED_IN(29); + +#endif // __ANDROID_API__ >= 29 + +/** + * Sets an operand to a value stored in a memory object. + * + * The content of the memory is not copied. A reference to that memory is stored + * inside the model. The application must not change the content of the memory + * region until all executions using this model have completed. As the data may + * be copied during processing, modifying the data after this call yields + * undefined results. + * + * <p>The provided memory must outlive this model.</p> + * + * To indicate that an optional operand should be considered missing, + * use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer. + * + * It is disallowed to set an operand value with shared memory backed by an AHardwareBuffer + * of a format other than AHARDWAREBUFFER_FORMAT_BLOB. + * + * It is disallowed to set an operand value with memory created from + * {@link ANeuralNetworksMemory_createFromDesc}. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been + * called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on + * AHardwareBuffer usage. + * + * Available since API level 27. + * + * @param model The model to be modified. + * @param index The index of the model operand we're setting. + * @param buffer A pointer to the data to use. + * @param memory The memory containing the data. + * @param offset This specifies the location of the data within the memory. + * The offset is in bytes from the start of memory. + * @param length The size in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model, int32_t index, + const ANeuralNetworksMemory* memory, + size_t offset, size_t length) + __INTRODUCED_IN(27); + +#if __ANDROID_API__ >= 30 + +/** + * Sets an operand to a value that is a reference to another NNAPI model. + * + * The referenced model must already have been finished by a call to + * {@link ANeuralNetworksModel_finish}. + * + * The {@link ANeuralNetworksModel_relaxComputationFloat32toFloat16} setting of + * referenced models is overridden by that setting of the main model of a + * compilation. + * + * The referenced model must outlive the model referring to it. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has + * been called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * Available since API level 30. + * + * @param model The model to be modified. + * @param index The index of the model operand we're setting. + * @param value The model to be referenced. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksModel_setOperandValueFromModel(ANeuralNetworksModel* model, int32_t index, + const ANeuralNetworksModel* value) + __INTRODUCED_IN(30); + +#endif // __ANDROID_API__ >= 30 + +/** + * Add an operation to a model. + * + * @param model The model to be modified. + * @param type The {@link ANeuralNetworksOperationType} of the operation. + * @param inputCount The number of entries in the inputs array. + * @param inputs An array of indexes identifying each operand. + * @param outputCount The number of entries in the outputs array. + * @param outputs An array of indexes identifying each operand. + * + * The operands specified by inputs and outputs must have been + * previously added by calls to {@link ANeuralNetworksModel_addOperand}. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been + * called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * Available since API level 27. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model, + ANeuralNetworksOperationType type, uint32_t inputCount, + const uint32_t* inputs, uint32_t outputCount, + const uint32_t* outputs) __INTRODUCED_IN(27); + +/** + * Specifies which operands will be the model's inputs and + * outputs. Every model must have at least one input and one output. + * + * An operand cannot be used for both input and output. Doing so will + * return an error. + * + * @param model The model to be modified. + * @param inputCount The number of entries in the inputs array. + * @param inputs An array of indexes identifying the input operands. + * @param outputCount The number of entries in the outputs array. + * @param outputs An array of indexes identifying the output operands. + * + * The operands specified by inputs and outputs must have been + * previously added by calls to {@link ANeuralNetworksModel_addOperand}. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been + * called will return an error. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * Available since API level 27. + * + */ +int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount, + const uint32_t* inputs, uint32_t outputCount, + const uint32_t* outputs) __INTRODUCED_IN(27); + +#if __ANDROID_API__ >= 28 + +/** + * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be + * calculated with range and/or precision as low as that of the IEEE 754 16-bit + * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32} + * must be calculated using at least the range and precision of the IEEE 754 + * 32-bit floating-point format. + * + * The relaxComputationFloat32toFloat16 setting of the main model of + * a compilation overrides the values of the referenced models. + * + * @param model The model to be modified. + * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be + * calculated with range and/or precision as low as that of the + * IEEE 754 16-bit floating point format. 'false' indicates + * {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using + * at least the range and precision of the IEEE 754 32-bit floating + * point format. + * + * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been + * called will return an error. + * + * Available since API level 28. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + */ +int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model, bool allow) + __INTRODUCED_IN(28); + +#endif // __ANDROID_API__ >= 28 + +/** + * Create a {@link ANeuralNetworksCompilation} to compile the given model. + * + * The model passed to this function is termed the "main model" of the + * compilation, to distinguish it from other models referred to by an Operand + * of type {@link ANEURALNETWORKS_MODEL} within this compilation. + * + * <p>This function only creates the object. Compilation is only performed once + * {@link ANeuralNetworksCompilation_finish} is invoked.</p> + * + * <p>{@link ANeuralNetworksCompilation_finish} should be called once + * all desired properties have been set on the compilation.</p> + * + * <p>{@link ANeuralNetworksModel_free} should be called once the compilation + * is no longer needed.</p> + * + * <p>The provided model must outlive the compilation.</p> + * + * The model must already have been finished by a call to + * {@link ANeuralNetworksModel_finish}. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param model The {@link ANeuralNetworksModel} to be compiled. + * @param compilation The newly created object or NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA + * if the model is invalid. + */ +int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model, + ANeuralNetworksCompilation** compilation) __INTRODUCED_IN(27); + +/** + * Destroy a compilation. + * + * The compilation need not have been finished by a call to + * {@link ANeuralNetworksCompilation_finish}. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param compilation The compilation to be destroyed. Passing NULL is acceptable and + * results in no operation. + */ +void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation) __INTRODUCED_IN(27); + +/** + * Sets the execution preference. + * + * <p>Provides guidance to the runtime when trade-offs are possible. By default the runtime + * uses PREFER_SINGLE_FAST_ANSWER</p> + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param compilation The compilation to be modified. + * @param preference Either {@link PREFER_LOW_POWER}, + * {@link PREFER_SINGLE_FAST_ANSWER}, or + * {@link PREFER_SUSTAINED_SPEED}. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation* compilation, + int32_t preference) __INTRODUCED_IN(27); + +/** + * Indicate that we have finished modifying a compilation. Required before + * calling {@link ANeuralNetworksBurst_create} or + * {@link ANeuralNetworksExecution_create}. + * + * An application must ensure that no other thread uses the compilation at the + * same time. + * + * This function must only be called once for a given compilation. + * + * If {@link ANeuralNetworksCompilation_setTimeout} was called on this + * compilation, and the compilation is not able to be finished before the + * timeout duration is exceeded, then compilation may be aborted, in which case + * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param compilation The compilation to be finished. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation) __INTRODUCED_IN(27); + +#if __ANDROID_API__ >= 30 + +/** + * Set the execution priority. + * + * Execution priorities are relative to other executions created by the same + * application (specifically same uid) for the same device. Specifically, + * priorities of executions from one application will not affect executions from + * another application. Similarly, priorities of executions on one device will + * not affect executions on another device. + * + * Higher priority executions may use more compute resources than lower priority + * executions, and may preempt or starve lower priority executions. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. + * + * Available since API level 30. + * + * @param compilation The compilation to be modified. + * @param priority The relative priority of the execution compared to other + * executions created by the application. Must be one of + * ANEURALNETWORKS_PRIORITY_*. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksCompilation_setPriority(ANeuralNetworksCompilation* compilation, int priority) + __INTRODUCED_IN(30); + +/** + * Set the maximum expected duration for compiling the model. + * + * If the device is not able to complete the compilation within the specified + * duration, the compilation may be aborted. The timeout duration begins at the + * call to {@link ANeuralNetworksCompilation_finish}. + * + * This timeout duration acts as a hint to drivers, and can be used to both free + * up compute resources within the driver and return control back to the + * application quicker than is possible without the hint. It enables drivers + * that are able to estimate how long a compilation will take to abort the + * compilation before it has even started if the driver believes the compilation + * cannot be completed within the timeout duration. Similarly, it enables + * drivers to abort an ongoing compilation if it is taking too long. However, + * this call does not guarantee that the compilation will complete or abort + * within the timeout duration. + * + * By default (i.e., unless ANeuralNetworksCompilation_setTimeout is called), + * the timeout duration for compiling the model is considered infinite. + * + * The {@link ANeuralNetworksCompilation} must have been created with + * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1, + * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the + * device has a feature level reported by + * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the + * timeout duration hint will be ignored. + * + * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. + * + * @param compilation The compilation to be modified. + * @param duration The maximum amount of time in nanoseconds that is expected to + * be spent finishing a compilation. If this duration is exceeded, the + * compilation may be aborted. If set to 0, the timeout duration is + * considered infinite. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 30. + */ +int ANeuralNetworksCompilation_setTimeout(ANeuralNetworksCompilation* compilation, + uint64_t duration) __INTRODUCED_IN(30); + +#endif // __ANDROID_API__ >= 30 + +/** + * Create a {@link ANeuralNetworksExecution} to apply the given compilation. + * This only creates the object. Computation is only performed once + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies} is invoked. + * + * <p>The provided compilation must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. + * @param execution The newly created object or NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA + * if the compilation is invalid. + */ +int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation, + ANeuralNetworksExecution** execution) __INTRODUCED_IN(27); + +/** + * Destroy an execution. + * + * <p>The execution need not have been scheduled by a call to + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksExecution_startComputeWithDependencies}; but if it has been scheduled, + * then the application must not call {@link ANeuralNetworksExecution_free} + * until the execution has completed (i.e., + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, or + * {@link ANeuralNetworksEvent_wait} has returned). + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param execution The execution to be destroyed. Passing NULL is acceptable and + * results in no operation. + */ +void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) __INTRODUCED_IN(27); + +/** + * Associate a user buffer with an input of the model of the + * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have + * been scheduled. Once evaluation of the execution has been scheduled, the + * application must not change the content of the buffer until the execution has + * completed. Evaluation of the execution will not change the content of the + * buffer. + * + * <p>The provided buffer must outlive the execution.</p> + * + * If the input is optional, you can indicate that it is omitted by + * passing nullptr for buffer and 0 for length. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param execution The execution to be modified. + * @param index The index of the input argument we are setting. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with + * {@link ANeuralNetworksModel_addOperand}. + * @param type The {@link ANeuralNetworksOperandType} of the + * operand. Unless the input is omitted, this should be + * used to specify the dimensions that were left + * unspecified when the operand was added to the + * model. All other properties of the type must be the + * same as specified in the model. If the type is the same + * as specified when the model was built, NULL can be + * passed. Neither the {@link ANeuralNetworksOperandType} + * nor the dimensions it points to need to outlive the call + * to {@link ANeuralNetworksExecution_setInput}. + * @param buffer The buffer containing the data. + * @param length The length in bytes of the buffer. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the + * name is not recognized or the buffer is too small for the input. + */ +int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index, + const ANeuralNetworksOperandType* type, const void* buffer, + size_t length) __INTRODUCED_IN(27); + +/** + * Associate a region of a memory object with an input of the model of the + * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have + * been scheduled. Once evaluation of the execution has been scheduled, the + * application must not change the content of the region until the execution has + * completed. Evaluation of the execution will not change the content of the + * region. + * + * <p>The provided memory must outlive the execution.</p> + * + * If the input is optional, you can indicate that it is omitted by + * using {@link ANeuralNetworksExecution_setInput} instead, passing nullptr for + * buffer and 0 for length. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on + * AHardwareBuffer usage. + * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects + * created from memory descriptors. + * + * Available since API level 27. + * + * @param execution The execution to be modified. + * @param index The index of the input argument we are setting. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link ANeuralNetworksModel_addOperand}. + * @param type The {@link ANeuralNetworksOperandType} of the + * operand. This should be used to specify the dimensions + * that were left unspecified when the operand was added + * to the model. All other properties of the type must be + * the same as specified in the model. If the type is the + * same as specified when the model was built, NULL can be + * passed. Neither the {@link ANeuralNetworksOperandType} + * nor the dimensions it points to need to outlive the call + * to {@link ANeuralNetworksExecution_setInputFromMemory}. + * @param memory The memory containing the data. + * @param offset This specifies the location of the data within the memory. + * The offset is in bytes from the start of memory. + * @param length The size in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the + * name is not recognized or the buffer is too small for the input. + */ +int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execution, int32_t index, + const ANeuralNetworksOperandType* type, + const ANeuralNetworksMemory* memory, size_t offset, + size_t length) __INTRODUCED_IN(27); + +/** + * Associate a user buffer with an output of the model of the + * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have + * been scheduled. Once evaluation of the execution has been scheduled, the + * application must not change the content of the buffer until the execution has + * completed. + * + * If the output is optional, you can indicate that it is omitted by + * passing nullptr for buffer and 0 for length. + * + * <p>The provided buffer must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param execution The execution to be modified. + * @param index The index of the output argument we are setting. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link ANeuralNetworksModel_addOperand}. + * @param type The {@link ANeuralNetworksOperandType} of the + * operand. Unless the output is omitted, this should be + * used to specify the dimensions that were left + * unspecified when the operand was added to the + * model. All other properties of the type must be the + * same as specified in the model. If the type is the same + * as specified when the model was built, NULL can be + * passed. Neither the {@link ANeuralNetworksOperandType} + * nor the dimensions it points to need to outlive the call + * to {@link ANeuralNetworksExecution_setOutput}. + * Since API level 29, the output operand can have unspecified + * dimensions or rank to be deduced dynamically during the execution. + * However, the user must provide a large enough buffer. The user + * can retrieve the output dimensional information after the execution + * by {@link ANeuralNetworksExecution_getOutputOperandRank} and + * {@link ANeuralNetworksExecution_getOutputOperandDimensions}. + * @param buffer The buffer where the data is to be written. + * @param length The length in bytes of the buffer. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the + * name is not recognized or the buffer is too small for the output. + */ +int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index, + const ANeuralNetworksOperandType* type, void* buffer, + size_t length) __INTRODUCED_IN(27); + +/** + * Associate a region of a memory object with an output of the model of the + * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have + * been scheduled. Once evaluation of the execution has been scheduled, the + * application must not change the content of the region until the execution has + * completed. + * + * If the output is optional, you can indicate that it is omitted by + * using {@link ANeuralNetworksExecution_setOutput} instead, passing nullptr for + * buffer and 0 for length. + * + * <p>The provided memory must outlive the execution.</p> + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * See {@link ANeuralNetworksMemory_createFromAHardwareBuffer} for information on + * AHardwareBuffer usage. + * See {@link ANeuralNetworksMemory_createFromDesc} for information on usage of memory objects + * created from memory descriptors. + * + * Available since API level 27. + * + * @param execution The execution to be modified. + * @param index The index of the output argument we are setting. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link ANeuralNetworksModel_addOperand}. + * @param type The {@link ANeuralNetworksOperandType} of the operand. This should be + * used to specify the dimensions that were left + * unspecified when the operand was added to the + * model. All other properties of the type must be the + * same as specified in the model. If the type is the same + * as specified when the model was built, NULL can be + * passed. Neither the {@link ANeuralNetworksOperandType} + * nor the dimensions it points to need to outlive the call + * to {@link ANeuralNetworksExecution_setOutputFromMemory}. + * Since API level 29, the output operand can have unspecified + * dimensions or rank to be deduced dynamically during the execution. + * However, the user must provide a large enough memory. The user + * can retrieve the output dimensional information after the execution + * by {@link ANeuralNetworksExecution_getOutputOperandRank} and + * {@link ANeuralNetworksExecution_getOutputOperandDimensions}. + * @param memory The memory where the data is to be stored. + * @param offset This specifies the location of the data within the memory. + * The offset is in bytes from the start of memory. + * @param length The length in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the + * name is not recognized or the buffer is too small for the output. + */ +int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execution, int32_t index, + const ANeuralNetworksOperandType* type, + const ANeuralNetworksMemory* memory, size_t offset, + size_t length) __INTRODUCED_IN(27); + +/** + * Schedule asynchronous evaluation of the execution. + * + * <p>Schedules asynchronous evaluation of the execution. Once the execution + * has completed and the outputs are ready to be consumed, the returned event + * will be signaled. Use {@link ANeuralNetworksEvent_wait} to wait for that + * event. + * </p> + * + * ANeuralNetworksEvent_wait must be called to recuperate the resources used + * by the execution. + * + * If {@link ANeuralNetworksExecution_setTimeout} was called on this execution, + * and the execution is not able to complete before the timeout duration is + * exceeded, then execution may be aborted, in which case + * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned through + * {@link ANeuralNetworksExecution_startCompute} or + * {@link ANeuralNetworksEvent_wait} on the event object. If the device has a + * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that + * is lower than 30, then the timeout duration hint will be ignored. + * + * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and + * the condition model does not output false within the loop timeout duration, + * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} + * will be returned through {@link ANeuralNetworksEvent_wait} on the event + * object. + * + * If the device can detect before the execution has started that the execution + * will not complete within the timeout duration, the device may choose to skip + * the execution and instead return {@link ANEURALNETWORKS_MISSED_DEADLINE_*}. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * See {@link ANeuralNetworksExecution_compute} for synchronous execution. + * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution. + * See {@link ANeuralNetworksExecution_startComputeWithDependencies} for + * asynchronous execution with dependencies. + * + * Available since API level 27. + * + * @param execution The execution to be scheduled and executed. + * @param event The event that will be signaled on completion. event is set to + * NULL if there's an error. + * + * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled. + */ +int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution, + ANeuralNetworksEvent** event) __INTRODUCED_IN(27); + +#if __ANDROID_API__ >= 30 + +/** + * Set the maximum expected duration of the specified execution. + * + * If the device is not able to complete the execution within the specified + * duration, the execution may be aborted. The timeout duration begins at a + * call to one of: + * - {@link ANeuralNetworksExecution_burstCompute} + * - {@link ANeuralNetworksExecution_compute} + * - {@link ANeuralNetworksExecution_startCompute} + * - {@link ANeuralNetworksExecution_startComputeWithDependencies} + * + * This timeout duration acts as a hint to drivers, and can be used to both free + * up compute resources within the driver and return control back to the + * application quicker than is possible without the hint. It enables drivers + * that are able to estimate how long an execution will take to abort the + * execution before it has even started if the driver believes the execution + * cannot be completed within the timeout duration. Similarly, it enables + * drivers to abort an ongoing execution if it is taking too long. However, this + * call does not guarantee that the execution will complete or abort within the + * timeout duration. + * + * By default (i.e., unless ANeuralNetworksExecution_setTimeout is called), + * the timeout duration for execution is considered infinite. + * + * The {@link ANeuralNetworksExecution} must have been created from an + * {@link ANeuralNetworksCompilation} which in turn was created from + * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1, + * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If the + * device has a feature level reported by + * {@link ANeuralNetworksDevice_getFeatureLevel} that is lower than 30, then the + * timeout duration hint will be ignored. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be modified. + * @param duration The maximum amount of time in nanoseconds that is expected to + * be spent executing a model. If this duration is exceeded, the execution + * may be aborted. If set to 0, the timeout duration is considered infinite. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 30. + */ +int ANeuralNetworksExecution_setTimeout(ANeuralNetworksExecution* execution, uint64_t duration) + __INTRODUCED_IN(30); + +/** + * Set the maximum duration of WHILE loops in the specified execution. + * + * This is a fuzzy per-loop timeout intended to prevent infinite loops. + * + * If a WHILE loop condition model does not output false within the specified + * duration, the execution will be aborted. + * + * See {@link ANeuralNetworks_getDefaultLoopTimeout} and + * {@link ANeuralNetworks_getMaximumLoopTimeout} for the default + * and maximum timeout values. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * @param execution The execution to be modified. + * @param duration The maximum amount of time in nanoseconds that can be spent + * executing a WHILE loop. If the specified duration value exceeds the value + * produced by {@link ANeuralNetworks_getMaximumLoopTimeout}, it will be + * overridden by that value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * ANEURALNETWORKS_BAD_STATE if execution has started. + * ANEURALNETWORKS_UNEXPECTED_NULL if execution is NULL. + * + * Available since API level 30. + */ +int ANeuralNetworksExecution_setLoopTimeout(ANeuralNetworksExecution* execution, uint64_t duration) + __INTRODUCED_IN(30); + +/** + * Get the default timeout value for WHILE loops. + * + * @return The default timeout value in nanoseconds. + * + * Available since API level 30. + */ +uint64_t ANeuralNetworks_getDefaultLoopTimeout() __INTRODUCED_IN(30); + +/** + * Get the maximum timeout value for WHILE loops. + * + * @return The maximum timeout value in nanoseconds. + * + * Available since API level 30. + */ +uint64_t ANeuralNetworks_getMaximumLoopTimeout() __INTRODUCED_IN(30); + +#endif // __ANDROID_API__ >= 30 + +/** + * Waits until the execution completes. + * + * More than one thread can wait on an event. When the execution completes, + * all threads will be released. + * + * If {@link ANeuralNetworksExecution_setTimeout} was called on the execution + * corresponding to this event, and the execution is not able to complete + * before the duration is exceeded, the execution may be aborted, in which case + * {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be returned here. + * + * If the execution contains a {@link ANEURALNETWORKS_WHILE} operation, and + * the condition model does not output false within the loop timeout duration, + * the execution will be aborted, and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} + * will be returned here. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param event The event that will be signaled on completion. + * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. + * ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot + * be properly mapped. + */ +int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); + +/** + * Destroys the event. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param event The event object to be destroyed. Passing NULL is acceptable and + * results in no operation. + */ +void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); + +#endif // __ANDROID_API__ >= 27 + +#if __ANDROID_API__ >= 30 +/** + * Create a {@link ANeuralNetworksEvent} from a sync_fence file descriptor. + * + * The newly created ANeuralNetworksEvent does not take ownership of the provided sync_fence_fd, + * it will instead dup the provided sync_fence_fd and own the duplicate. + * + * @param sync_fence_fd The sync_fence file descriptor. + * @param event The newly created object or NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 30. + */ +int ANeuralNetworksEvent_createFromSyncFenceFd(int sync_fence_fd, ANeuralNetworksEvent** event) + __INTRODUCED_IN(30); + +/** + * Get sync_fence file descriptor from the event. + * + * If the ANeuralNetworksEvent is not backed by a sync fence, the sync_fence_fd + * will be set to -1, and ANEURALNETWORKS_BAD_DATA will be returned. + * + * See {@link ANeuralNetworksEvent_createFromSyncFenceFd} and + * {@link ANeuralNetworksExecution_startComputeWithDependencies} to see how to create + * an event backed by a sync fence. + * + * The user takes ownership of the returned fd, and must close the returned file descriptor when + * it is no longer needed. + * + * @param event An event that is backed by a sync fence. + * @param sync_fence_fd The sync_fence file descriptor. The file descriptor will + * be set to -1 if there is an error. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 30. + */ +int ANeuralNetworksEvent_getSyncFenceFd(const ANeuralNetworksEvent* event, int* sync_fence_fd) + __INTRODUCED_IN(30); + +/** + * Schedule asynchronous evaluation of the execution with dependencies. + * + * The execution will wait for all the depending events to be signaled before + * starting the evaluation. Once the execution has completed and the outputs + * are ready to be consumed, the returned event will be signaled. Depending on which + * devices are handling the execution, the event could be backed by a sync fence. + * Use {@link ANeuralNetworksEvent_wait} to wait for that event. + * + * ANeuralNetworksEvent_wait must be called to recurperate the resources used + * by the execution. + * + * If parts of the execution are scheduled on devices that do not support fenced execution, + * the function call may wait for such parts to finish before returning. + * + * The function will return an error if any of the events in dependencies is already in a bad + * state. After the execution is scheduled, if any of the events in dependencies does not complete + * normally, the execution will fail, and {@link ANeuralNetworksEvent_wait} on the returned + * event will return an error. + * + * The function will return an error if any of the execution outputs has a tensor operand type + * that is not fully specified. + * + * The function can be passed a timeout duration in nanoseconds. This timeout + * duration acts as a hint to drivers in the same way that the timeout durations + * in {@link ANeuralNetworksCompilation_setTimeout} and {@link + * ANeuralNetworksExecution_setTimeout} act as hints to drivers. The duration + * begins when all waitFor sync fences have been signaled, and can be used + * together with {@link ANeuralNetworksExecution_setTimeout} which specifies the + * maximum timeout duration beginning at the call to + * {@link ANeuralNetworksExecution_startComputeWithDependencies}. + * If the duration is non-zero, the {@link ANeuralNetworksExecution} must have been created + * from an {@link ANeuralNetworksCompilation} which in turn was created from + * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1, + * otherwise this function will fail with ANEURALNETWORKS_BAD_DATA. If either + * the timeout duration from {@link ANeuralNetworksExecution_setTimeout} or the + * timeout duration passed to this call is exceeded, the execution may be + * aborted, in which case {@link ANEURALNETWORKS_MISSED_DEADLINE_*} will be + * returned through {@link ANeuralNetworksExecution_startComputeWithDependencies} + * or {@link ANeuralNetworksEvent_wait} on the event object. If the device has a + * feature level reported by {@link ANeuralNetworksDevice_getFeatureLevel} that + * is lower than 30, then the timeout duration hints will be ignored. + * + * If this execution contains a {@link ANEURALNETWORKS_WHILE} operation, and + * the condition model does not output false within the loop timeout duration, + * then execution will be aborted and {@link ANEURALNETWORKS_MISSED_DEADLINE_*} + * will be returned through {@link ANeuralNetworksEvent_wait} on the event + * object. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * See {@link ANeuralNetworksExecution_compute} for synchronous execution. + * See {@link ANeuralNetworksExecution_burstCompute} for burst synchronous execution. + * See {@link ANeuralNetworksExecution_startCompute} for regular asynchronous execution. + * + * @param execution The execution to be scheduled and executed. + * @param dependencies A set of depending events. The actual evaluation will not start + * until all the events are signaled. + * @param num_dependencies The number of events in the dependencies set. + * @param duration The maximum amount of time in nanoseconds that is expected to + * be spent executing the model after all dependencies are + * signaled. If set to 0, the timeout duration is considered + * infinite. + * @param event The event that will be signaled on completion. event is set to + * NULL if there's an error. + * + * @return ANEURALNETWORKS_NO_ERROR if the evaluation is successfully scheduled. + * + * Available since API level 30. + */ +int ANeuralNetworksExecution_startComputeWithDependencies( + ANeuralNetworksExecution* execution, const ANeuralNetworksEvent* const* dependencies, + uint32_t num_dependencies, uint64_t duration, ANeuralNetworksEvent** event) + __INTRODUCED_IN(30); + +#endif // __ANDROID_API__ >= 30 + +__END_DECLS + +#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H + +/** @} */
diff --git a/tools/api/NeuralNetworksTypes.t b/tools/api/NeuralNetworksTypes.t deleted file mode 100644 index a562fc7..0000000 --- a/tools/api/NeuralNetworksTypes.t +++ /dev/null
@@ -1,728 +0,0 @@ -%% template file for generating NeuralNetworksTypes.h. -%% see README.md. -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @addtogroup NeuralNetworks - * @{ - */ - -/** - * @file NeuralNetworksTypes.h - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H -#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H - -/****************************************************************** - * - * IMPORTANT NOTICE: - * - * This file is part of Android's set of stable system headers - * exposed by the Android NDK (Native Development Kit). - * - * Third-party source AND binary code relies on the definitions - * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. - * - * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) - * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS - * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY - * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES - */ - -#include <android/hardware_buffer.h> -#include <stdbool.h> -#include <stddef.h> -#include <stdint.h> -#include <sys/cdefs.h> - -__BEGIN_DECLS - -%insert Operand_1.0_Comment -typedef enum { -%insert Operand_1.0 -%insert Operand_1.2 -%insert Operand_1.3 -} OperandCode; - -%insert Operation_1.0_Comment -typedef enum { - // Operations below are available since NNAPI feature level 1. - -%insert Operation_1.0 - - // Operations below are available since NNAPI feature level 2. - -%insert Operation_1.1 - - // Operations below are available since NNAPI feature level 3. - -%insert Operation_1.2 - - // Operations below are available since NNAPI feature level 4. - -%insert Operation_1.3 -} OperationCode; - -%insert FusedActivationFunc - -/** - * Implicit padding algorithms. - * - * - * Available since NNAPI feature level 1. - */ -typedef enum { - /** - * SAME padding. - * Padding on both ends are the "same": - * padding_to_beginning = total_padding / 2 - * padding_to_end = (total_padding + 1)/2. - * i.e., for even number of padding, padding to both ends are exactly - * the same; for odd number of padding, padding to the ending is bigger - * than the padding to the beginning by 1. - * - * total_padding is a function of input, stride, dilation and filter size. - * It could be computed as follows: - * out_size = (input + stride - 1) / stride - * effective_filter_size = (filter_size - 1) * dilation + 1 - * needed_input = (out_size - 1) * stride + effective_filter_size - * total_padding = max(0, needed_input - input_size) - * The computation is the same for the horizontal and vertical directions. - */ - ANEURALNETWORKS_PADDING_SAME = 1, - - /** - * VALID padding. - * No padding. When the input size is not evenly divisible by - * the filter size, the input at the end that could not fill - * the whole filter tile will simply be ignored. - */ - ANEURALNETWORKS_PADDING_VALID = 2, -} PaddingCode; - -%insert ExecutionPreference - -%insert DeviceType - -/** - * NNAPI feature levels. - * - * Each update of the NNAPI specification yields a new NNAPI feature level enum value. - * NNAPI feature level corrseponds to an NNAPI specification version that a driver - * and/or the NNAPI runtime can implement. - * - * A feature level up to and including "FEATURE_LEVEL_5" maps directly to - * the Android API level that introduced the corresponding update of the NNAPI - * specification. Feature levels after Android API level 31 have no association with - * API level because the NNAPI specification can be updated between Android API - * releases. Outputs of {@link ANeuralNetworksDevice_getFeatureLevel} and - * {@link ANeuralNetworks_getRuntimeFeatureLevel} must be compared against - * these enum values instead of the Android API level. - */ -typedef enum { - /** NNAPI specification available in Android O-MR1, Android NNAPI feature level 1 */ - ANEURALNETWORKS_FEATURE_LEVEL_1 = 27, - /** NNAPI specification available in Android P, Android NNAPI feature level 2 */ - ANEURALNETWORKS_FEATURE_LEVEL_2 = 28, - /** NNAPI specification available in Android Q, Android NNAPI feature level 3 */ - ANEURALNETWORKS_FEATURE_LEVEL_3 = 29, - /** NNAPI specification available in Android R, Android NNAPI feature level 4 */ - ANEURALNETWORKS_FEATURE_LEVEL_4 = 30, - /** - * NNAPI specification available in Android S, Android NNAPI feature level 5. - * After Android S, the NNAPI specification can be updated between Android - * API releases. - */ - ANEURALNETWORKS_FEATURE_LEVEL_5 = 31, -} FeatureLevelCode; - -/** - * Result codes. - * - * <p>Any NNAPI function can return any result code, including result codes not - * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR} - * indicates a failure of some kind.</p> - * - * <p>Additional information about the nature of a failure can be obtained from - * the device log after enabling NNAPI debugging by setting the debug.nn.vlog - * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p> - * - * Available since NNAPI feature level 1. - */ -typedef enum { - /** - * Operation was successful. - */ - ANEURALNETWORKS_NO_ERROR = 0, - - /** - * Failure caused by not enough available memory. - */ - ANEURALNETWORKS_OUT_OF_MEMORY = 1, - - ANEURALNETWORKS_INCOMPLETE = 2, - - /** - * Failure caused by unexpected null argument. - */ - ANEURALNETWORKS_UNEXPECTED_NULL = 3, - - /** - * Failure caused by invalid function arguments, invalid model definition, - * invalid execution definition or invalid data at execution time. - */ - ANEURALNETWORKS_BAD_DATA = 4, - - /** - * Failure caused by failed model execution. - */ - ANEURALNETWORKS_OP_FAILED = 5, - - /** - * Failure caused by object being in the wrong state. - */ - ANEURALNETWORKS_BAD_STATE = 6, - - /** - * Failure caused by not being able to map a file into memory. - * This may be caused by a file descriptor not being mappable, or an AHardwareBuffer - * not supported by the device. - * Mitigate by reading its content into memory. - */ - ANEURALNETWORKS_UNMAPPABLE = 7, - - /** - * Failure caused by insufficient buffer size provided to a model output. - */ - ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8, - - /** - * Failure caused by a device not being available. - */ - ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9, - - /** - * Failure because a deadline could not be met for a task, but future - * deadlines may still be met for the same task after a short delay. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10, - - /** - * Failure because a deadline could not be met for a task, and future - * deadlines will likely also not be met for the same task even after a - * short delay. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11, - - /** - * Failure because of a resource limitation within the driver, but future - * calls for the same task may still succeed after a short delay. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12, - - /** - * Failure because of a resource limitation within the driver, and future - * calls for the same task will likely also fail even after a short - * delay. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13, - - /** - * Failure indicating an object is in a dead state. - * - * Available since NNAPI feature level 4. - */ - ANEURALNETWORKS_DEAD_OBJECT = 14, -} ResultCode; - -/** - * For {@link ANeuralNetworksModel_setOperandValue}, values with a - * length smaller or equal to this will be immediately copied into - * the model. The size is in bytes. - * - * Available since NNAPI feature level 1. - */ -enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 }; - -/** - * For {@link ANeuralNetworksCompilation_setCaching}, specify the size - * of the cache token required from the application. The size is in bytes. - * - * Available since NNAPI feature level 3. - */ -enum { ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN = 32 }; - -/** - * Different duration measurements. - * - * Durations are measured in nanoseconds. - * - * Available since NNAPI feature level 3. - */ -typedef enum { - // Execution time on hardware (not driver, which runs on host processor). - ANEURALNETWORKS_DURATION_ON_HARDWARE = 0, - // Execution time in driver (including time on hardware). Excludes overhead - // such as that of the runtime itself and the IPC needed for the runtime to - // communicate with the driver. - ANEURALNETWORKS_DURATION_IN_DRIVER = 1, - // Execution time on hardware, after all dependencies have been signaled. - // If no dependencies specified (for example, if the execution was scheduled other - // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the - // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE. - // Available since NNAPI feature level 4. - ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2, - // Execution time in driver, after all dependencies have been signaled. Excludes - // overhead such as that of the runtime itself and the IPC needed for the runtime - // to communicate with the driver. - // If no dependencies specified (for example, if the execution was scheduled other - // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the - // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER. - // Available since NNAPI feature level 4. - ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3, -} DurationCode; - -%insert Priority - -/** - * ANeuralNetworksMemory is an opaque type that represents memory. - * - * This type is used to represent shared memory, memory mapped files, - * and similar memories. - * - * By using shared memory, a program can efficiently communicate to the - * runtime and drivers the tensors that define a model. See - * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application - * should typically create one shared memory object that contains every constant tensor - * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be used to - * create shared memory from a file handle. - * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} can be used to - * create shared memory from an AHardwareBuffer handle. - * - * Memory objects can also be used to specify the input and output arguments of - * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory} - * and {@link ANeuralNetworksExecution_setOutputFromMemory}. - * - * When calling {@link ANeuralNetworksModel_setOperandValueFromMemory}, - * {@link ANeuralNetworksExecution_setInputFromMemory} and - * {@link ANeuralNetworksExecution_setOutputFromMemory}, each operand in the shared - * memory object must be aligned on a boundary of a byte size that is a multiple - * of the element type byte size, e.g., a tensor with - * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary. - * - * It is the application's responsibility to ensure that there are no uses of - * the memory after calling {@link ANeuralNetworksMemory_free}. This includes - * any model which references this memory because of a call to - * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation - * created using such a model, any execution object or burst object created - * using such a compilation, or any execution which references this memory - * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or - * {@link ANeuralNetworksExecution_setOutputFromMemory}. - * - * Available since NNAPI feature level 1. - * - * Starting at NNAPI feature level 4, the application may request creation of device native memory - * from {@link ANeuralNetworksMemoryDesc} to avoid potential memory copying and transformation - * overhead between executions. See also {@link ANeuralNetworksMemoryDesc} and - * {@link ANeuralNetworksMemory_createFromDesc}. - */ -typedef struct ANeuralNetworksMemory ANeuralNetworksMemory; - -/** - * ANeuralNetworksModel is an opaque type that contains a description of the - * mathematical operations that constitute the model. - * - * <p>Build the model by calling<ul> - * <li>{@link ANeuralNetworksModel_create}</li> - * <li>{@link ANeuralNetworksModel_addOperation}</li> - * <li>{@link ANeuralNetworksModel_addOperand}</li> - * </ul> - * - * This forms a graph in which each operation and operand is a node, a - * directed edge from an operand to an operation indicates that the - * operand is an input to the operation, and a directed edge from an - * operation to an operand indicates that the operand is an output - * from the operation. This graph must be acyclic. - * - * A model is completed by calling {@link ANeuralNetworksModel_finish}. - * A model is destroyed by calling {@link ANeuralNetworksModel_free}. - * - * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish} - * has been called on it.</p> - * - * <p>It is the application's responsibility to make sure that only one thread - * modifies a model at a given time. It is however safe for more than one - * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p> - * - * <p>It is also the application's responsibility to ensure that there are no - * other uses of the model after calling {@link ANeuralNetworksModel_free}. - * This includes any compilation, execution object or burst object created using - * the model.</p> - * - * Available since NNAPI feature level 1. - */ -typedef struct ANeuralNetworksModel ANeuralNetworksModel; - -/** - * ANeuralNetworksCompilation is an opaque type that can be used to compile - * a machine learning model. - * - * <p>To use:<ul> - * <li>Create a new compilation instance by calling the - * {@link ANeuralNetworksCompilation_create} function or - * {@link ANeuralNetworksCompilation_createForDevices}.</li> - * <li>Set any desired properties on the compilation (for example, - * {@link ANeuralNetworksCompilation_setPreference}).</li> - * <li>Optionally, set the caching signature and the cache directory on the - * compilation by calling {@link ANeuralNetworksCompilation_setCaching}.</li> - * <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li> - * <li>Use the compilation as many times as needed - * with {@link ANeuralNetworksExecution_create} and - * {@link ANeuralNetworksBurst_create}.</li> - * <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free} - * once all executions using the compilation have completed.</li></ul></p> - * - * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}. - * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}. - * - * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish} - * has been called on it.</p> - * - * <p>It is the application's responsibility to make sure that only - * one thread modifies a compilation at a given time. It is however - * safe for more than one thread to use the compilation once - * {@link ANeuralNetworksCompilation_finish} has returned.</p> - * - * <p>It is also the application's responsibility to ensure that there are no other - * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}. - * This includes any execution object or burst object created using the compilation, - * or any memory descriptor with the compilation as part of one of the roles specified by - * {@link ANeuralNetworksMemoryDesc_addInputRole} or - * {@link ANeuralNetworksMemoryDesc_addOutputRole}.</p> - * - * Available since NNAPI feature level 1. - */ -typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; - -/** - * ANeuralNetworksExecution is an opaque type that can be used to apply a machine - * learning model to a set of inputs. - * - * <p>To use:<ul> - * <li>Create a new execution instance by calling the - * {@link ANeuralNetworksExecution_create} function.</li> - * <li>Associate input buffers or memory regions to the model inputs with - * {@link ANeuralNetworksExecution_setInput} or - * {@link ANeuralNetworksExecution_setInputFromMemory}.</li> - * <li>Associate output buffers or memory regions to the model outputs with - * {@link ANeuralNetworksExecution_setOutput} or - * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li> - * <li>Optionally, configure the execution with - * {@link ANeuralNetworksExecution_setLoopTimeout}, - * {@link ANeuralNetworksExecution_setMeasureTiming}, - * {@link ANeuralNetworksExecution_setReusable}, or - * {@link ANeuralNetworksExecution_setTimeout}. - * <li>Apply the model with one of the following:</li><ul> - * <li>Asynchronously with {@link ANeuralNetworksExecution_startCompute} - * or with {@link ANeuralNetworksExecution_startComputeWithDependencies}, - * waiting for the execution to complete with - * {@link ANeuralNetworksEvent_wait}.</li> - * <li>Synchronously with {@link ANeuralNetworksExecution_compute}.</li> - * <li>Synchronously as part of an execution burst with - * {@link ANeuralNetworksExecution_burstCompute}.</li></ul> - * If the execution has been marked as reusable, then you can - * apply the model more than once. - * <li>Destroy the execution with - * {@link ANeuralNetworksExecution_free}.</li></ul></p> - * - * <p>An output buffer or memory region must not overlap with any - * other output buffer or memory region, with an input buffer or - * memory region, or with an operand value in a memory object - * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p> - * - * <p>An execution is in the preparation state after it is created by - * {@link ANeuralNetworksExecution_create}. An execution may only be modified in the preparation - * state. Scheduling a computation by calling {@link ANeuralNetworksExecution_burstCompute}, - * {@link ANeuralNetworksExecution_compute}, {@link ANeuralNetworksExecution_startCompute}, - * or {@link ANeuralNetworksExecution_startComputeWithDependencies} will change the state of - * the execution object to the computation state. When the computation completes, the state of - * the execution object will change from the computation state to the completed state. - * The computation is completed when {@link ANeuralNetworksExecution_compute}, - * {@link ANeuralNetworksExecution_burstCompute}, or {@link ANeuralNetworksEvent_wait} - * has returned.</p> - * - * <p>An execution can be applied to a model with - * {@link ANeuralNetworksExecution_burstCompute}, - * {@link ANeuralNetworksExecution_compute}, - * {@link ANeuralNetworksExecution_startCompute} or - * {@link ANeuralNetworksExecution_startComputeWithDependencies} only once. Create new - * executions to do new evaluations of the model.</p> - * - * <p>Starting at NNAPI feature level 5, the application may call - * {@link ANeuralNetworksExecution_setReusable} to set an execution to be reusable for multiple - * computations. The application may schedule and evaluate a computation again from the completed - * state of a reusable execution. The execution cannot be modified between computations.</p> - * - * <p>It is the application's responsibility to make sure that only one thread - * modifies an execution at a given time. It is however safe for more than one - * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p> - * - * <p>It is also the application's responsibility to ensure that the execution - * either has never been scheduled or has completed (i.e., that - * {@link ANeuralNetworksExecution_burstCompute}, - * {@link ANeuralNetworksExecution_compute}, or - * {@link ANeuralNetworksEvent_wait} has returned) before calling - * {@link ANeuralNetworksExecution_free}.</p>. - * - * <p>It is also the application's responsibility to ensure that there are no other - * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p> - * - * <p>It is the application's responsibility to ensure that there are no concurrent computations - * scheduled and evaluated on the same execution, either by means of - * {@link ANeuralNetworksExecution_compute} or - * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous) - * in different threads, or by means of - * {@link ANeuralNetworksExecution_startCompute} or - * {@link ANeuralNetworksExecution_startComputeWithDependencies} (which are asynchronous). - * It is however safe to schedule and evaluate multiple computations on different executions - * concurrently. (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on - * different burst objects.) The runtime makes no guarantee on the ordering of - * completion of executions. If it's important to the application, the - * application should enforce the ordering by ensuring that one execution - * completes before the next is scheduled (for example, by scheduling all - * executions synchronously within a single thread, or by scheduling all - * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between - * calls to {@link ANeuralNetworksExecution_startCompute}); or by using - * {@link ANeuralNetworksExecution_startComputeWithDependencies} to make the execution wait for a - * list of events to be signaled before starting the actual evaluation.</p> - * - * Available since NNAPI feature level 1. - */ -typedef struct ANeuralNetworksExecution ANeuralNetworksExecution; - -%insert SymmPerChannelQuantParams_Comment -typedef struct ANeuralNetworksSymmPerChannelQuantParams { - /** The index of the channel dimension. */ - uint32_t channelDim; - /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */ - uint32_t scaleCount; - /** The array of scaling values for each channel. Each value must be greater than zero. */ - const float* scales; -} ANeuralNetworksSymmPerChannelQuantParams; - -/** - * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency - * of a rapid sequence of executions. It will likely cause overhead if only used - * for a single execution. - * - * ANeuralNetworksBurst serves as a context object for any number of inferences - * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst - * object and the {@link ANeuralNetworksExecution} objects used with it must all - * have been created from the same {@link ANeuralNetworksCompilation} object. - * - * This object is also used as a hint to drivers, providing insight to the - * lifetime of a rapid sequence of executions. For example, a driver may choose - * to increase the clock frequency of its accelerator for the lifetime of a - * burst object. - * - * <p>To use:<ul> - * <li>Create a new burst object by calling the - * {@link ANeuralNetworksBurst_create} function.</li> - * <li>For each execution:</li><ul> - * <li>Create {@link ANeuralNetworksExecution} and configure its - * properties (see {@link ANeuralNetworksExecution} for details).</li> - * <li>Apply the model synchronously with - * {@link ANeuralNetworksExecution_burstCompute}, reusing the same - * {@link ANeuralNetworksBurst} with the new - * {@link ANeuralNetworksExecution}.</li> - * <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul> - * <li>Destroy the burst with - * {@link ANeuralNetworksBurst_free}.</li></ul></p> - * - * Available since NNAPI feature level 3. - */ -typedef struct ANeuralNetworksBurst ANeuralNetworksBurst; - -/** - * ANeuralNetworksOperandType describes the type of an operand. - * - * This structure is used to describe both scalars and tensors. - * - * A tensor operand type with all dimensions specified is "fully - * specified". Whenever possible (i.e., whenever the dimensions are - * known at model construction time), a tensor operand type should be - * (but is not required to be) fully specified, in order to enable the - * best possible performance. - * - * If a tensor operand's type is not fully specified, the dimensions - * of the operand are deduced from the operand types and values of the - * operation for which that operand is an output or from the corresponding - * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input - * operand type in the case of referenced model input operands. - * - * <p>In the following situations, a tensor operand type must be fully - * specified:<ul> - * <li>The operand has a constant value, set by - * {@link ANeuralNetworksModel_setOperandValue} (with a - * non-nullptr buffer) or - * {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li> - * <li>The operand is a model input (see - * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main - * model within a compilation. A fully specified tensor operand type - * must either be provided to {@link ANeuralNetworksModel_addOperand}; - * or it must be provided to the corresponding - * {@link ANeuralNetworksExecution_setInput}, or - * {@link ANeuralNetworksExecution_setInputFromMemory}. - * EXCEPTION: If the input is optional and omitted - * (by passing nullptr for buffer to - * {@link ANeuralNetworksExecution_setInput}) then it need - * not have a fully specified tensor operand type.</li> - * <li>The operand is a model output (see - * {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main - * model within a compilation and is to be used with {@link - * ANeuralNetworksExecution_startComputeWithDependencies}. - * A fully specified tensor operand type must either be provided - * to {@link ANeuralNetworksModel_addOperand}; or it must be - * provided to the corresponding - * {@link ANeuralNetworksExecution_setOutput}, or - * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li></ul> - * - * A tensor operand type of specified rank but some number of - * unspecified dimensions is represented by setting dimensionCount to - * the rank and each unspecified dimension to 0. - * - * Available since NNAPI feature level 1. - * - * Starting at NNAPI feature level 3, a tensor operand type of unspecified rank is - * represented by setting dimensionCount to 0 and dimensions to NULL (just as if - * it were a scalar operand type). - */ -typedef struct ANeuralNetworksOperandType { - /** - * The data type, e.g ANEURALNETWORKS_FLOAT32. - */ - int32_t type; - - /** - * The number of dimensions (rank). - * - * Must be 0 for scalars. - */ - uint32_t dimensionCount; - - /** - * The dimensions of the tensor. - * - * Must be nullptr for scalars. - */ - const uint32_t* dimensions; - - /** - * The quantization scale. - * - * Must be 0 when not applicable to an operand type. - * - * See {@link OperandCode}. - */ - float scale; - - /** - * The quantization zero point. - * - * Must be 0 when not applicable to an operand type. - * - * See {@link OperandCode}. - */ - int32_t zeroPoint; -} ANeuralNetworksOperandType; - -/** - * Aliasing to {@link OperationCode}, used in function - * {@link ANeuralNetworksModel_addOperation}. - */ -typedef int32_t ANeuralNetworksOperationType; - -/** - * ANeuralNetworksEvent is an opaque type that represents an event - * that will be signaled once an execution completes. - * - * Available since NNAPI feature level 1. - */ -typedef struct ANeuralNetworksEvent ANeuralNetworksEvent; - -/** - * ANeuralNetworksDevice is an opaque type that represents a device. - * - * This type is used to query basic properties and supported operations of the corresponding - * device, and control which device(s) a model is to be run on. - * - * Available since NNAPI feature level 3. - */ -typedef struct ANeuralNetworksDevice ANeuralNetworksDevice; - -/** - * ANeuralNetworksMemoryDesc is an opaque type that represents a memory descriptor. - * - * A memory descriptor describes the properties of a memory object, and is used by - * {@link ANeuralNetworksMemory_createFromDesc}. - * - * To use: - * - Create a new memory descriptor by calling {@link ANeuralNetworksMemoryDesc_create}. - * - Specify all of the intended input and output roles by calling - * {@link ANeuralNetworksMemoryDesc_addInputRole} and - * {@link ANeuralNetworksMemoryDesc_addOutputRole}. - * - Optionally, specify the memory dimensions by calling - * {@link ANeuralNetworksMemoryDesc_setDimensions}. - * - Complete the memory descriptor with {@link ANeuralNetworksMemoryDesc_finish}. - * - Use the memory descriptor as many times as needed with - * {@link ANeuralNetworksMemory_createFromDesc}. - * - Destroy the memory descriptor with {@link ANeuralNetworksMemoryDesc_free}. - * - * A memory descriptor is completed by calling {@link ANeuralNetworksMemoryDesc_finish}. - * A memory descriptor is destroyed by calling {@link ANeuralNetworksMemoryDesc_free}. - * - * A memory descriptor must not be modified once {@link ANeuralNetworksMemoryDesc_finish} - * has been called on it. - * - * It is the application's responsibility to make sure that only - * one thread modifies a memory descriptor at a given time. It is however - * safe for more than one thread to use the memory descriptor once - * {@link ANeuralNetworksMemoryDesc_finish} has returned. - * - * It is also the application's responsibility to ensure that there are no other - * uses of the memory descriptor after calling {@link ANeuralNetworksMemoryDesc_free}. - * It is however safe to continue using a {@link ANeuralNetworksMemory} object created - * from the memory descriptor. - * - * Available since NNAPI feature level 4. - */ -typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc; - -__END_DECLS - -#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H - -/** @} */
diff --git a/tools/api/OperandTypes.t b/tools/api/OperandTypes.t deleted file mode 100644 index e106d08..0000000 --- a/tools/api/OperandTypes.t +++ /dev/null
@@ -1,58 +0,0 @@ -%% template file for generating OperandTypes.h. -%% see README.md. -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERAND_TYPES_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERAND_TYPES_H - -namespace android::nn { - -%% -%% The function `getAlignmentForLength` assumes that required (or optimal) -%% alignment is a function of length (not data type), and assumes that there -%% is a maximum alignment requirement. If a new operand type is added with a -%% stricter alignment requirement, then `getAlignmentForLength` may need to -%% be modified. -%% -%insert Operand_1.0_Comment -enum class OperandType { -%insert Operand_1.0 - -%insert Operand_1.2 - -%insert Operand_1.3 - - /** - * DEPRECATED. Since HAL version 1.2, extensions are the preferred - * alternative to OEM operation and data types. - * - * OEM specific scalar value. - */ - OEM = 10000, - - /** - * DEPRECATED. Since HAL version 1.2, extensions are the preferred - * alternative to OEM operation and data types. - * - * A tensor of OEM specific values. - */ - TENSOR_OEM_BYTE = 10001, -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERAND_TYPES_H
diff --git a/tools/api/OperationTypes.t b/tools/api/OperationTypes.t deleted file mode 100644 index 73d4a28..0000000 --- a/tools/api/OperationTypes.t +++ /dev/null
@@ -1,46 +0,0 @@ -%% template file for generating OperationTypes.h. -%% see README.md. -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERATION_TYPES_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERATION_TYPES_H - -namespace android::nn { - -%insert Operation_1.0_Comment -enum class OperationType { -%insert Operation_1.0 - -%insert Operation_1.1 - -%insert Operation_1.2 - -%insert Operation_1.3 - - /** - * DEPRECATED. Since HAL version 1.2, extensions are the preferred - * alternative to OEM operation and data types. - * - * This operation is OEM specific. It should only be used for OEM - * applications. - */ - OEM_OPERATION = 10000, -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_OPERATION_TYPES_H
diff --git a/tools/api/README.md b/tools/api/README.md index e73d841..fe4dd48 100644 --- a/tools/api/README.md +++ b/tools/api/README.md
@@ -1,37 +1,31 @@ # API File Generation -There are certain pieces of `NeuralNetworksTypes.h`, `Types.h`, -`OperandTypes.h`, `OperationTypes.h`, and of our various `*.hal` files that -ought to be kept in sync -- most notably the operand type and operation type -definitions and descriptions. To avoid having to do this manually, a tool -`generate_api.py` is employed to combine a single *specification file* with one -*template file* per API file (`NeuralNetworksTypes.h`, `Types.h`, -`OperandTypes.h`, `OperationTypes.h`, or `types.hal`) to produce that API file. -The script `generate_api.sh` invokes `generate_api.py` once per API file, -passing appropriate arguments. +There are certain pieces of `NeuralNetworks.h` and of our various `*.hal` files +that ought to be kept in sync -- most notably the operand type and operation +type definitions and descriptions in our `NeuralNetworks.h` and `types.hal` +files. To avoid having to do this manually, a tool `generate_api.py` is +employed to combine a single *specification file* with one *template file* per +API file (`NeuralNetworks.h` or `types.hal`) to produce that API file. The +script `generate_api.sh` invokes `generate_api.py` once per API file, passing +appropriate arguments. ## `generate_api.sh` The environment variable `ANDROID_BUILD_TOP` must be set. -Invoked with no arguments or with the `--mode=update` argument, this script -regenerates each API file in place, by invoking `generate_api.py` once per +Invoked with no arguments, this script regenerates the `NeuralNetworks.h` file +and every `types.hal` file in place, by invoking `generate_api.py` once per generated file. -Invoked with the `--mode=hook` argument, this script checks whether -`NeuralNetworksTypes.h`, `Types.h`, `OperandTypes.h`, or `OperationTypes.h` -needs to be regenerated. - -When the `--dryrun` argument is present, this script shows how it would invoke -`generate_api.py` but does not actually regenerate files or check whether they -need to be regenerated. +Invoked with the `--dryrun` argument, this script instead shows how it would +invoke `generate_api.py`. ## `generate_api.py` This tool generates a single output file from an input specification file and an input template file. It takes the following mandatory arguments: -* `--output OUTPUT` path to generated output file (such as `Types.h`) +* `--output OUTPUT` path to generated output file (such as `NeuralNetworks.h`) * `--specification SPECIFICATION` path to input specification file * `--template TEMPLATE` path to input template file * `--kind KIND` token identifying kind of file to generate @@ -39,9 +33,8 @@ The "kind" is an arbitrary token that the specification file can reference with the `%kind` directive to help generate different text in different situations. It has no meaning to the tool itself. Today, the following kinds are used: -`ndk` (when generating `NeuralNetworksTypes.h`), `canonical` (when generating -`Types.h`, `OperandTypes.h`, and `OperationTypes.h`), `hal_1.0` (when generating -`1.0/types.hal`), `hal_1.1`, `hal_1.2`, and `hal_1.3`. +`ndk` (when generating `NeuralNetworks.h`), `hal_1.0` (when generating +`1.0/types.hal`), `hal_1.1`, `hal_1.2` and `hal_1.3`. ## Template File Syntax @@ -60,11 +53,6 @@ output file. The section is defined by a `%section` directive in the specification file. -#### `%insert-indented *count* *name*` - -Similar to `%insert *name*`, but each non-empty copied line is prefixed with -*count* space characters. *count* must be a non-negative integer. - ## Specification File Syntax The specification file consists of comments, *directives*, and other text. @@ -83,14 +71,15 @@ Certain regions can enclose certain other regions, but this is very limited: -* A conditional region can enclose a section region. -* A section region can enclose a conditional region. +* A conditional region can enclose a definition region. +* A section region can enclose a conditional region or a definition region. Equivalently: * A conditional region can be enclosed by a section region. -* A section region can be enclosed by a conditional region. - +* A definition region can be enclosed by a conditional region or a section + region. + #### null region A *null region* is a sequence of lines that is not part of any other region. @@ -112,16 +101,23 @@ directive are ignored *except* that even ignored directives undergo some level of syntactic and semantic checking. +#### definition region + +A *definition region* is a sequence of lines immediately preceded by the +`%define-lines *name*` directive and immediately followed by the +`%/define-lines` directive. Every non-comment line in the sequence undergoes +macro substitution, and the resulting lines are associated with the region name. +They can later be added to a section region with the `%insert-lines` directive. + +This can be thought of as a multi-line macro facility. + #### section region A *section region* is a sequence of lines immediately preceded by the `%section *name*` directive and immediately followed by the `%/section` directive. Every -line in the sequence that doesn't begin with `%` undergoes macro substitution, -and the resulting lines are associated with the section name. They can be -inserted into the generated output file as directed by the template file's -`%insert` and `%insert-indented` directives. They can be added to another -section region with the with the specification file's `%insert` and -`%insert-indented` directives. +non-comment line in the sequence undergoes macro substitution, and the resulting +lines are associated with the section name. They can be inserted into the +generated output file as directed by the template file's `%insert` directive. This is the mechanism by which a specification file contributes text to the generated output file. @@ -136,10 +132,10 @@ %define test this body begins and ends with a space character -Macro substitution occurs within a section region: a substring `%{*name*}` is -replaced with the corresponding *body*. Macro substitution is *not* recursive: -A substring `%{*name2*}` in *body* will not undergo macro substitution, except -as discussed for *macro arguments* below. +Macro substitution occurs within a definition region or a section region: a +substring `%{*name*}` is replaced with the corresponding *body*. Macro +substitution is *not* recursive: A substring `%{*name2*}` in *body* will not +undergo macro substitution, except as discussed for *macro arguments* below. Permitted in regions: null, conditional, section @@ -150,37 +146,31 @@ substring of the form `%{argnum}` will be replaced by the corresponding argument from *arglist*. For example, if the definition is -``` -%define test second is %{2}, first is %{1} -``` - + %define test second is %{2}, first is %{1} + then the macro invocation -``` -%{test alpha beta} -``` - + %{test alpha beta} + is expanded to -``` -second is beta, first is alpha -``` + second is beta, first is alpha The only check on the number of arguments supplied at macro invocation time is that there must be at least as many arguments as the highest `%{argnum}` reference in the macro body. In the example above, `%{test alpha}` would be an error, but `%{test alpha beta gamma}` would not. -#### `%insert *name*` +#### `%define-lines *name*`, `%/define-lines` -Adds all lines from the named section region to the current section region. +`%define-lines *name*` creates a *definition region* terminated by +`%/define-lines`. -Permitted in regions: section +Permitted in regions: null, conditional, section -#### `%insert-indented *count* *name*` +#### `%insert-lines *name*` -Similar to `%insert *name*`, but each non-empty added line is prefixed -with *count* space characters. *count* must be a non-negative integer. +Adds all lines from the named definition region to the current section region. Permitted in regions: section @@ -209,7 +199,7 @@ This directive has two purposes: -* Validity-checking. If the "kind" is not on the space-delimited *list* of tokens, +* Sanity-checking. If the "kind" is not on the space-delimited *list* of tokens, `generate_api.py` terminates with an error. * Ordering the possible kinds for the *lowest version pattern* (see the section above for the explanation of the pattern). @@ -222,4 +212,4 @@ `%section *name*` creates a *section region* terminated by `%/section`. -Permitted in regions: null, conditional +Permitted in regions: null
diff --git a/tools/api/Types.t b/tools/api/Types.t deleted file mode 100644 index f36f577..0000000 --- a/tools/api/Types.t +++ /dev/null
@@ -1,257 +0,0 @@ -%% template file for generating Types.h. -%% see README.md. -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_TYPES_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_TYPES_H - -#include <android-base/chrono_utils.h> -#include <android-base/expected.h> -#include <android-base/unique_fd.h> - -#include <array> -#include <chrono> -#include <limits> -#include <memory> -#include <optional> -#include <string> -#include <type_traits> -#include <utility> -#include <variant> -#include <vector> - -#include "nnapi/OperandTypes.h" -#include "nnapi/OperationTypes.h" -#include "nnapi/Result.h" - -// Forward declare AHardwareBuffer -extern "C" typedef struct AHardwareBuffer AHardwareBuffer; - -namespace android::nn { - -// Forward declarations - -class IBuffer; -class IBurst; -class IDevice; -class IExecution; -class IPreparedModel; -struct Memory; - -// Constants - -constexpr float kDefaultExecTime = std::numeric_limits<float>::max(); -constexpr float kDefaultPowerUsage = std::numeric_limits<float>::max(); -constexpr uint32_t kByteSizeOfCacheToken = 32; -constexpr uint32_t kMaxNumberOfCacheFiles = 32; - -%insert ExtensionTypeEncoding - -constexpr uint32_t kDefaultRequestMemoryAlignment = 64; -constexpr uint32_t kDefaultRequestMemoryPadding = 64; -constexpr uint32_t kMinMemoryAlignment = alignof(std::max_align_t); -constexpr uint32_t kMinMemoryPadding = 1; -constexpr auto kLoopTimeoutDefault = std::chrono::seconds{2}; -constexpr auto kLoopTimeoutMaximum = std::chrono::seconds{15}; - -// Aliases - -using SharedBuffer = std::shared_ptr<const IBuffer>; -using SharedBurst = std::shared_ptr<const IBurst>; -using SharedDevice = std::shared_ptr<const IDevice>; -using SharedExecution = std::shared_ptr<const IExecution>; -using SharedMemory = std::shared_ptr<const Memory>; -using SharedPreparedModel = std::shared_ptr<const IPreparedModel>; - -// Canonical types - -%insert DeviceStatus - -%insert ExecutionPreference - -%insert DeviceType - -%insert MeasureTiming - -%insert Priority - -// TODO: Should more errors from NeuralNetworks.h be incorporated? The left name shows errors that -// appear in NeuralNetworks.h but not in the HAL, and the right column shows what these values could -// map to: -// * OUT_OF_MEMORY ==> GENERAL_FAILURE / RESOURCE_EXHAUSTED_* -// * INCOMPLETE ==> GENERAL_FAILURE -// * UNEXPECTED_NULL ==> INVALID_ARGUMENT -// * UNMAPPABLE ==> GENERAL_FAILURE -// * BAD_STATE ==> INVALID_ARGUMENT -enum class ErrorStatus { - NONE = 0, - DEVICE_UNAVAILABLE = 1, - GENERAL_FAILURE = 2, - OUTPUT_INSUFFICIENT_SIZE = 3, - INVALID_ARGUMENT = 4, - MISSED_DEADLINE_TRANSIENT = 5, - MISSED_DEADLINE_PERSISTENT = 6, - RESOURCE_EXHAUSTED_TRANSIENT = 7, - RESOURCE_EXHAUSTED_PERSISTENT = 8, - DEAD_OBJECT = 10000, -}; - -struct GeneralError { - std::string message; - ErrorStatus code = ErrorStatus::GENERAL_FAILURE; -}; - -template <typename Type> -using GeneralResult = base::expected<Type, GeneralError>; - -%insert FusedActivationFunc - -using Dimension = uint32_t; -using Dimensions = std::vector<Dimension>; - -using CacheToken = std::array<uint8_t, kByteSizeOfCacheToken>; - -%insert OutputShape - -struct ExecutionError { - std::string message; - ErrorStatus code = ErrorStatus::GENERAL_FAILURE; - // OutputShapes for code == OUTPUT_INSUFFICIENT_SIZE - std::vector<OutputShape> outputShapes = {}; -}; - -template <typename Type> -using ExecutionResult = base::expected<Type, ExecutionError>; - -%insert Capabilities - -%insert Extension - -%insert Operation - -%insert DataLocation - -%insert Operand - -struct Handle { - std::vector<base::unique_fd> fds; - std::vector<int> ints; -}; - -using SharedHandle = std::shared_ptr<const Handle>; - -struct Memory { - struct Ashmem { - base::unique_fd fd; - size_t size; - }; - - struct Fd { - size_t size; - int prot; - base::unique_fd fd; - size_t offset; - }; - - // RAII wrapper for AHardwareBuffer - struct HardwareBuffer { - using Deleter = std::add_pointer_t<void(AHardwareBuffer*)>; - using Handle = std::unique_ptr<AHardwareBuffer, Deleter>; - Handle handle; - }; - - struct Unknown { - Handle handle; - size_t size; - std::string name; - }; - - std::variant<Ashmem, Fd, HardwareBuffer, Unknown> handle; -}; - -%insert Model - -%insert BufferDesc - -%insert BufferRole - -%insert Request - -// Representation of sync_fence. -class SyncFence { - public: - static SyncFence createAsSignaled(); - static SyncFence create(base::unique_fd fd); - static Result<SyncFence> create(SharedHandle syncFence); - - // The function syncWait() has the same semantics as the system function - // ::sync_wait(), except that the syncWait() return value is semantically - // richer. - enum class FenceState { - ACTIVE, // fence has not been signaled - SIGNALED, // fence has been signaled - ERROR, // fence has been placed in the error state - UNKNOWN, // either bad argument passed to syncWait(), or internal error - }; - using Timeout = std::chrono::duration<int, std::milli>; - using OptionalTimeout = std::optional<Timeout>; - - FenceState syncWait(OptionalTimeout optionalTimeout) const; - - SharedHandle getSharedHandle() const; - bool hasFd() const; - int getFd() const; - - private: - explicit SyncFence(SharedHandle syncFence); - - SharedHandle mSyncFence; -}; - -using Clock = base::boot_clock; - -using Duration = std::chrono::nanoseconds; -using OptionalDuration = std::optional<Duration>; - -using TimePoint = std::chrono::time_point<Clock, Duration>; -using OptionalTimePoint = std::optional<TimePoint>; - -%insert Timing - -// Returns status, timingLaunched, timingFenced -using ExecuteFencedInfoCallback = std::function<GeneralResult<std::pair<Timing, Timing>>()>; - -enum class Version { ANDROID_OC_MR1, ANDROID_P, ANDROID_Q, ANDROID_R, ANDROID_S, CURRENT_RUNTIME }; - -// Describes the memory preference of an operand. -struct MemoryPreference { - // Must be a power of 2. - // For pointer buffers, the alignment is satisfied if the address of the pointer is a multiple - // of the "alignment" value. For memory pools, the alignment is satisfied if the offset of the - // sub-region specified by DataLocation is a multiple of the "alignment" value. - uint32_t alignment; - // Must be a power of 2. - // For both pointer buffers and memory pools, the padding is satisfied if the padded length is - // greater than or equal to the raw size of the operand (i.e. the size of an element multiplied - // by the number of elements) rounding up to a multiple of the "padding" value. In DataLocation, - // the padded length equals to the sum of the length and padding fields. - uint32_t padding; -}; - -} // namespace android::nn - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_TYPES_H
diff --git a/tools/api/generate_api.py b/tools/api/generate_api.py index 4603a06..00081bc 100755 --- a/tools/api/generate_api.py +++ b/tools/api/generate_api.py
@@ -1,5 +1,5 @@ #!/usr/bin/python3 -""" Generate an output file from a specification file and a template file. +""" Generate NeuralNetworks.h or types.hal from a specification file and a template file. See README.md for more details. """ @@ -48,16 +48,18 @@ super(Specification, self).__init__(filename) self.sections = dict() # key is section name, value is array of strings (lines) in the section self.section = None # name of current %section - self.section_start = None # first line number of current %section self.defmacro = dict() # key is macro name, value is string (body of macro) + self.deflines = dict() # key is definition name, value is array of strings (lines) in the definition + self.deflines_key = None # name of current %define-lines self.kind = kind self.kinds = None # remember %define-kinds self.conditional = self.UNCONDITIONAL - self.conditional_start = None # first line number of current %kind def finish(self): assert self.section is None, "\"%section " + self.section + \ "\" not terminated by end of specification file" + assert self.deflines_key is None, "\"%define-lines " + self.deflines_key + \ + "\" not terminated by end of specification file" assert self.conditional is self.UNCONDITIONAL, "%kind not terminated by end of specification file" def macro_substitution(self): @@ -117,8 +119,7 @@ " wasn't defined in %define-kinds" ) lowest_pos = self.kinds.index(pattern[:-1]) - if self.kind in self.kinds[lowest_pos:]: - return True + return self.kind in self.kinds[lowest_pos:] else: # An ordinary pattern: See if it matches self.kind. if not self.kinds is None and not pattern in self.kinds: @@ -135,10 +136,9 @@ definition, etc. """ - DIRECTIVES = [ - "%define", "%define-kinds", "%else", "%insert", "%insert-indented", - "%kind", "%/kind", "%section", "%/section" - ] + DIRECTIVES = ["%define", "%define-kinds", "%define-lines", "%/define-lines", + "%else", "%insert-lines", "%kind", "%/kind", "%section", + "%/section"] # Common typos: /%directive, \%directive matchbad = re.search("^[/\\\]%(\S*)", self.line) @@ -157,32 +157,49 @@ if not directive in DIRECTIVES: assert False, "Unknown directive \"" + directive + "\" on " + self.context() - # Check for insert - match = re.search("^%insert(?:-indented\s+(\S+))?\s+(\S+)\s*$", self.line) + # Check for end of multiline macro + match = re.search("^%/define-lines\s*(\S*)", self.line) if match: - directive = self.line.split(" ", 1)[0] - assert not self.section is None, directive + " outside %section at " + self.context() - count = match[1] or "0" - key = match[2] - assert re.match("^\d+$", count), "Bad count \"" + count + "\" on " + self.context() - assert key in self.sections, "Unknown section \"" + key + "\" on " + self.context() - assert key != self.section, "Cannot insert section \"" + key + "\" into itself on " + self.context() + assert match[1] == "", "Malformed directive \"%/define-lines\" on " + self.context() + assert not self.deflines_key is None, "%/define-lines with no matching %define-lines on " + \ + self.context() + self.deflines_key = None + return + + # Directives are forbidden within multiline macros + assert self.deflines_key is None, "Directive is not permitted in definition of \"" + \ + self.deflines_key + "\" at " + self.context() + + # Check for define (multi line) + match = re.search("^%define-lines\s+(\S+)\s*$", self.line) + if match: + key = match[1] + if self.conditional is self.CONDITIONAL_OFF: + self.deflines_key = "" + return + assert not key in self.deflines, "Duplicate definition of \"" + key + "\" on " + self.context() + self.deflines[key] = [] + self.deflines_key = key + # Non-directive lines will be added to self.deflines[key] as they are read + # until we see %/define-lines + return + + # Check for insert + match = re.search("^%insert-lines\s+(\S+)\s*$", self.line) + if match: + assert not self.section is None, "%insert-lines outside %section at " + self.context() + key = match[1] + assert key in self.deflines, "Missing definition of lines \"" + key + "\" at " + self.context() if self.conditional is self.CONDITIONAL_OFF: return - indent = " " * int(count) - self.sections[self.section].extend( - (indent + line if line.rstrip("\n") else line) - for line in self.sections[key]) + self.sections[self.section].extend(self.deflines[key]); return # Check for start of section match = re.search("^%section\s+(\S+)\s*$", self.line) if match: assert self.section is None, "Nested %section is forbidden at " + self.context() - self.section_start = self.lineno - if self.conditional is self.CONDITIONAL_OFF: - self.section = "" - return + assert self.conditional is self.UNCONDITIONAL, "%section within %kind is forbidden at " + self.context() key = match[1] assert not key in self.sections, "Duplicate definition of \"" + key + "\" on " + self.context() self.sections[key] = [] @@ -194,30 +211,24 @@ # Check for end of section if re.search("^%/section\s*$", self.line): assert not self.section is None, "%/section with no matching %section on " + self.context() - assert self.conditional_start is None or self.conditional_start < self.section_start, \ - "%kind not terminated by end of %section on " + self.context() + assert self.conditional is self.UNCONDITIONAL # can't actually happen self.section = None - self.section_start = None return # Check for start of kind match = re.search("^%kind\s+((\S+)(\s+\S+)*)\s*$", self.line) if match: - assert self.conditional is self.UNCONDITIONAL, \ - "Nested %kind is forbidden at " + self.context() + assert self.conditional is self.UNCONDITIONAL, "%kind is nested at " + self.context() patterns = match[1] if self.match_kind(patterns): self.conditional = self.CONDITIONAL_ON else: self.conditional = self.CONDITIONAL_OFF - self.conditional_start = self.lineno return # Check for complement of kind (else) if re.search("^%else\s*$", self.line): assert not self.conditional is self.UNCONDITIONAL, "%else without matching %kind on " + self.context() - assert self.section_start is None or self.section_start < self.conditional_start, \ - "%section not terminated by %else on " + self.context() if self.conditional == self.CONDITIONAL_ON: self.conditional = self.CONDITIONAL_OFF else: @@ -244,10 +255,7 @@ # Check for end of kind if re.search("^%/kind\s*$", self.line): assert not self.conditional is self.UNCONDITIONAL, "%/kind without matching %kind on " + self.context() - assert self.section_start is None or self.section_start < self.conditional_start, \ - "%section not terminated by end of %kind on " + self.context() self.conditional = self.UNCONDITIONAL - self.conditional_start = None return # Check for kinds definition @@ -281,6 +289,8 @@ if self.conditional is self.CONDITIONAL_OFF: pass + elif not self.deflines_key is None: + self.deflines[self.deflines_key].append(self.macro_substitution()) elif self.section is None: # Treat as comment pass @@ -308,17 +318,14 @@ return # Check for insertion - match = re.search("^%insert(?:-indented\s+(\S+))?\s+(\S+)\s*$", self.line) + match = re.search("^%insert\s+(\S+)\s*$", self.line) if match: - count = match[1] or "0" - key = match[2] - assert re.match("^\d+$", count), "Bad count \"" + count + "\" on " + self.context() + key = match[1] assert key in specification.sections, "Unknown section \"" + key + "\" on " + self.context() - indent = " " * int(count) for line in specification.sections[key]: if re.search("TODO", line, re.IGNORECASE): print("WARNING: \"TODO\" at " + self.context()) - self.lines.append(indent + line if line.rstrip("\n") else line) + self.lines.append(line) return # Bad directive @@ -351,6 +358,7 @@ specification.read() if (args.verbose): print(specification.defmacro) + print(specification.deflines) # Read the template template = Template(args.template, specification) @@ -363,5 +371,6 @@ # TODO: Write test cases for malformed specification and template files # TODO: Find a cleaner way to handle conditionals (%kind) or nesting in general; # maybe add support for more nesting -# TODO: Could we do away with the distinction between a specification file and a -# template file and add a %include directive? +# TODO: Unify section/define-lines, rather than having two kinds of text regions? +# Could we take this further and do away with the distinction between a +# specification file and a template file, and add a %include directive?
diff --git a/tools/api/generate_api.sh b/tools/api/generate_api.sh index b1f66b5..5581fcc 100755 --- a/tools/api/generate_api.sh +++ b/tools/api/generate_api.sh
@@ -18,7 +18,7 @@ shift ;; *) - echo >&2 "*** USAGE: $(basename $0) [--dryrun] [--mode={update|hook}]" + echo >&2 "*** USAGE: $(basename $0) [--dryrun] [--mode={update|ndk_hook}]" exit 1 ;; esac @@ -28,7 +28,6 @@ SPECFILE=$(dirname $0)/types.spec HALDIR=${ANDROID_BUILD_TOP}/hardware/interfaces/neuralnetworks NDKDIR=${ANDROID_BUILD_TOP}/packages/modules/NeuralNetworks/runtime/include -CANONICALDIR=${ANDROID_BUILD_TOP}/packages/modules/NeuralNetworks/common/include/nnapi RET=0 function doit { @@ -38,34 +37,23 @@ if [[ $? -ne 0 ]] ; then RET=1 ; fi } -function check { - typeset -r kind="$1" in="$2" out="$3" - TEMPFILE=$(mktemp) - doit ${kind} ${in} ${TEMPFILE} - if [[ ${RET} -eq 0 ]] ; then - ${DRYRUN} cmp -s ${out} ${TEMPFILE} || { - RET=1 - echo >&2 "Error: $(basename ${out}) is out of sync with $(basename ${in}) or types.spec. Please run generate_api.sh before uploading." - } - fi -} - case "${MODE}" in update) - doit canonical $(dirname $0)/Types.t ${CANONICALDIR}/Types.h - doit canonical $(dirname $0)/OperandTypes.t ${CANONICALDIR}/OperandTypes.h - doit canonical $(dirname $0)/OperationTypes.t ${CANONICALDIR}/OperationTypes.h - doit ndk $(dirname $0)/NeuralNetworksTypes.t ${NDKDIR}/NeuralNetworksTypes.h + doit ndk $(dirname $0)/NeuralNetworks.t ${NDKDIR}/NeuralNetworks.h doit hal_1.0 ${HALDIR}/1.0/types.t ${HALDIR}/1.0/types.hal doit hal_1.1 ${HALDIR}/1.1/types.t ${HALDIR}/1.1/types.hal doit hal_1.2 ${HALDIR}/1.2/types.t ${HALDIR}/1.2/types.hal doit hal_1.3 ${HALDIR}/1.3/types.t ${HALDIR}/1.3/types.hal ;; - hook) - check canonical $(dirname $0)/Types.t ${CANONICALDIR}/Types.h - check canonical $(dirname $0)/OperandTypes.t ${CANONICALDIR}/OperandTypes.h - check canonical $(dirname $0)/OperationTypes.t ${CANONICALDIR}/OperationTypes.h - check ndk $(dirname $0)/NeuralNetworksTypes.t ${NDKDIR}/NeuralNetworksTypes.h + ndk_hook) + TEMPDIR=$(mktemp -d) + doit ndk $(dirname $0)/NeuralNetworks.t ${TEMPDIR}/NeuralNetworks.h + if [[ ${RET} -eq 0 ]] ; then + ${DRYRUN} cmp -s ${NDKDIR}/NeuralNetworks.h ${TEMPDIR}/NeuralNetworks.h || { + RET=1 + echo >&2 "Error: NeuralNetworks.h is out of sync with NeuralNetworks.t or types.spec. Please run generate_api.sh before uploading." + } + fi ;; *) echo >&2 "*** Unknown mode: ${MODE}"
diff --git a/tools/api/types.spec b/tools/api/types.spec index d715ce9..82e4872 100644 --- a/tools/api/types.spec +++ b/tools/api/types.spec
@@ -1,6 +1,6 @@ %% -*-Fundamental-*- -%define-kinds canonical ndk hal_1.0 hal_1.1 hal_1.2 hal_1.3 +%define-kinds ndk hal_1.0 hal_1.1 hal_1.2 hal_1.3 %kind ndk %define ANN ANEURALNETWORKS_ @@ -9,100 +9,98 @@ %define DeclareOperation_1.2 ANEURALNETWORKS_%{1} = %{2} %define DeclareOperation_1.3 ANEURALNETWORKS_%{1} = %{2} %define FusedActivationFunc FuseCode -%define DeclareFusedActivationFunc ANEURALNETWORKS_FUSED_%{1} = %{2} -%define DeclareExecutionPreference ANEURALNETWORKS_PREFER_%{1} = %{2} -%define DeclareDeviceType ANEURALNETWORKS_DEVICE_%{1} = %{2} %define OperandType OperandCode %define OperandTypeLinkPfx ANEURALNETWORKS_ %define OperationTypeLinkPfx ANEURALNETWORKS_ %define runtime_or_driver runtime -%define NNAPILevel3 NNAPI feature level 3 -%define NNAPILevel4 NNAPI feature level 4 -%define BeforeNNAPILevel3For Before NNAPI feature level 3, for +%define model_or_subgraph model +%define MODEL_or_SUBGRAPH MODEL +%define APILevel29 API level 29 +%define APILevel30 API level 30 +%define BeforeAPILevel29For Before API level 29, for %define or_1.2 or {@link ANEURALNETWORKS_%{1}} %define NDK_if_specified (if specified) %define otherOperandParameters other operand parameters -%section AVAIL1 +%define-lines AVAIL27 * - * Available since NNAPI feature level 1. -%/section -%section AVAIL1Short + * Available since API level 27. +%/define-lines +%define-lines AVAIL27Short * - * Available since NNAPI feature level 1. -%/section -%section AVAIL2 + * Available since API level 27. +%/define-lines +%define-lines AVAIL28 * - * Available since NNAPI feature level 2. -%/section -%section AVAIL3 + * Available since API level 28. +%/define-lines +%define-lines AVAIL29 * - * Available since NNAPI feature level 3. -%/section -%section AVAIL4 + * Available since API level 29. +%/define-lines +%define-lines AVAIL30 * - * Available since NNAPI feature level 4. -%/section -%section OutputState + * Available since API level 30. +%/define-lines +%define-lines OutputState * - * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out - * and NNAPI does not maintain internal states. This operator does not support the usage pattern - * in which multiple cells are chained and state tensors are propagated. -%/section -%section PaddingCodeValues + * Important: As of API level 29, there is no way to get the output state tensors out and NNAPI + * does not maintain internal states. This operator does not support the usage pattern in which + * multiple cells are chained and state tensors are propagated. +%/define-lines +%define-lines PaddingCodeValues * {@link PaddingCode} values. -%/section +%/define-lines %/kind -%kind canonical hal* +%kind hal* %define ANN %define Ann %define FusedActivationFunc FusedActivationFunc -%define DeclareFusedActivationFunc %{1} = %{2} -%define DeclareExecutionPreference %{1} = %{2} -%define DeclareDeviceType %{1} = %{2} %define OperandType OperandType %define OperandTypeLinkPfx OperandType:: %define OperationTypeLinkPfx OperationType:: %define runtime_or_driver driver -%define NNAPILevel3 HAL version 1.2 -%define NNAPILevel4 HAL version 1.3 +%define model_or_subgraph subgraph +%define MODEL_or_SUBGRAPH SUBGRAPH +%define APILevel29 HAL version 1.2 +%define APILevel30 HAL version 1.3 %define NDK_if_specified %define otherOperandParameters extraParams -%section AVAIL1 -%/section -%section AVAIL1Short -%/section -%section AVAIL2 -%/section -%section AVAIL3 -%/section -%section AVAIL4 -%/section -%section PaddingCodeValues +%define-lines AVAIL27 +%/define-lines +%define-lines AVAIL27Short +%/define-lines +%define-lines AVAIL28 +%/define-lines +%define-lines AVAIL29 +%/define-lines +%define-lines AVAIL30 +%/define-lines +%define-lines PaddingCodeValues * following values: {0 (NONE), 1 (SAME), 2 (VALID)}. -%/section -%section OutputState -%/section +%/define-lines +%define-lines OutputState +%/define-lines %/kind %kind hal_1.0 hal_1.1 %define DeclareOperation %{1} = %{2} -%define BeforeNNAPILevel3For For +%define BeforeAPILevel29For For %define or_1.2 -%section NHWC_NCHW +%define-lines NHWC_NCHW * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width, * and Channels) data layout. -%/section -%section GenericZero -%/section -%section ZeroBatchesNNAPILevel3 -%/section +%/define-lines +%define-lines GenericZero +%/define-lines +%define-lines ZeroBatchesAPI29 +%/define-lines %define DeclareOperation_1.2 @@@NOT_DEFINED@@@ %define DeclareOperation_1.3 @@@NOT_DEFINED@@@ %/kind -%kind canonical hal_1.2 hal_1.3 -%define BeforeNNAPILevel3For Before HAL version 1.2, for +%kind hal_1.2 hal_1.3 +%define BeforeAPILevel29For Before HAL version 1.2, for %define or_1.2 or {@link OperandType::%{1}} %/kind @@ -118,114 +116,32 @@ %define DeclareOperation_1.3 %{1} = %{2} %/kind -%kind canonical -%define DeclareOperation %{1} = %{2} -%define DeclareOperation_1.2 %{1} = %{2} -%define DeclareOperation_1.3 %{1} = %{2} -%define DeclareEnumValue %{1} = %{2} -%define OperandLifeTime Operand::LifeTime -%define :: :: -%define vec std::vector -%define string std::string -%define init_bool = false -%define init_float = 0.0f -%define init_int = 0 -%define init_pod {} -%define Dimensions Dimensions -%define concat_or_skip_first %{2} -%/kind - -%kind hal* -%define DeclareEnumValue %{1} -%define OperandLifeTime OperandLifeTime -%define :: . -%define vec vec -%define string string -%define init_bool -%define init_float -%define init_int -%define init_pod -%define Dimensions vec<uint32_t> -%define concat_or_skip_first %{1}%{2} -%/kind - -%kind ndk -%define DeclareEnumValue @@@NOT_DEFINED@@@ -%define OperandLifeTime @@@NOT_DEFINED@@@ -%define :: @@@NOT_DEFINED@@@ -%define vec @@@NOT_DEFINED@@@ -%define string @@@NOT_DEFINED@@@ -%define init_bool @@@NOT_DEFINED@@@ -%define init_float @@@NOT_DEFINED@@@ -%define init_int @@@NOT_DEFINED@@@ -%define init_pod @@@NOT_DEFINED@@@ -%define Dimensions @@@NOT_DEFINED@@@ -%define concat_or_skip_first @@@NOT_DEFINED@@@ -%/kind - -%kind canonical ndk hal_1.2 hal_1.3 -%section NHWC_NCHW +%kind ndk hal_1.2 hal_1.3 +%define-lines NHWC_NCHW * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. * With the default data layout NHWC, the data is stored in the order of: * [batch, height, width, channels]. Alternatively, the data layout could * be NCHW, the data storage order of: [batch, channels, height, width]. - * NCHW is supported since %{NNAPILevel3}. -%/section -%section GenericZero - * Since %{NNAPILevel3}, generic zero-sized input tensor is supported. Zero + * NCHW is supported since %{APILevel29}. +%/define-lines +%define-lines GenericZero + * Since %{APILevel29}, generic zero-sized input tensor is supported. Zero * dimension is only compatible with 0 or 1. The size of the output * dimension is zero if either of corresponding input dimension is zero. * -%/section -%section ZeroBatchesNNAPILevel3 - * Since %{NNAPILevel3}, zero batches is supported for this tensor. -%/section +%/define-lines +%define-lines ZeroBatchesAPI29 + * Since %{APILevel29}, zero batches is supported for this tensor. +%/define-lines %/kind -%kind canonical ndk hal_1.3 +%kind ndk hal_1.3 %define AndQuant8Signed %/kind %kind hal_1.0 hal_1.1 hal_1.2 %define AndQuant8Signed %/kind -%kind ndk hal_1.0 hal_1.1 hal_1.2 -%define model_or_subgraph model -%define MODEL_or_SUBGRAPH MODEL -%define the_model_or_a_subgraph the model -%/kind - -%kind canonical hal_1.3+ -%define model_or_subgraph subgraph -%define MODEL_or_SUBGRAPH SUBGRAPH -%define the_model_or_a_subgraph a subgraph -%/kind - -%% Declaring enums that work across all kinds: -%% -%% %{enum X underlying_hal_type} { -%% %{DeclareX ...}, -%% ... -%% }%{ndk_enum_name X}; -%% -%% Note that %{ndk_enum_name X} can be omitted for non-NDK enums because the -%% macro definition is empty for all other kinds. -%kind canonical -%define enum enum class %{1} -%define ndk_enum_name -%define DeclarePriority %{1} = %{2} -%/kind -%kind ndk -%define enum typedef enum -%define ndk_enum_name %{1} -%define DeclarePriority ANEURALNETWORKS_PRIORITY_%{1} = %{3} -%/kind -%kind hal* -%define enum enum %{1} : %{2} -%define ndk_enum_name -%define DeclarePriority %{1} -%/kind - %section OEMDeprecationAndOperandTypeRangeMaxComment /* @@ -256,12 +172,6 @@ %% HAL OperandType for 1.0 %% NDK OperandCode for API 27 -%section canonical_empty_line -%kind canonical - -%/kind -%/section - %section Operand_1.0_Comment /** * Operand types. @@ -276,26 +186,21 @@ * types. Most used are {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, * and {@link %{OperandTypeLinkPfx}INT32}. -%insert AVAIL1Short +%insert-lines AVAIL27Short */ %/section %section Operand_1.0 /** A 32 bit floating point scalar value. */ %{ANN}FLOAT32 = 0, -%insert canonical_empty_line /** A signed 32 bit integer scalar value. */ %{ANN}INT32 = 1, -%insert canonical_empty_line /** An unsigned 32 bit integer scalar value. */ %{ANN}UINT32 = 2, -%insert canonical_empty_line /** A tensor of 32 bit floating point values. */ %{ANN}TENSOR_FLOAT32 = 3, -%insert canonical_empty_line /** A tensor of 32 bit integer values. */ %{ANN}TENSOR_INT32 = 4, -%insert canonical_empty_line /** * A tensor of 8 bit unsigned integers that represent real numbers. * @@ -320,7 +225,7 @@ * Operation types. * * The type of an operation in a model. -%insert AVAIL1Short +%insert-lines AVAIL27Short */ %/section @@ -346,16 +251,16 @@ * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * -%insert GenericZero +%insert-lines GenericZero * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) - * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) + * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 @@ -364,7 +269,7 @@ * * 0: A tensor. * * 1: A tensor of the same {@link %{OperandType}}, and compatible dimensions * as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scales and zeroPoint can be different from input0 scale and zeroPoint. @@ -375,14 +280,14 @@ * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, * the {@link %{FusedActivationFunc}} must be "NONE". %/kind * * Outputs: * * 0: The sum, a tensor of the same {@link %{OperandType}} as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. @@ -390,7 +295,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation ADD 0}, @@ -408,23 +313,23 @@ * ) / sum(1) * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * -%insert NHWC_NCHW +%insert-lines NHWC_NCHW * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. -%insert ZeroBatchesNNAPILevel3 +%insert-lines ZeroBatchesAPI29 * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on * the left, in the ‘width’ dimension. * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on @@ -444,19 +349,19 @@ * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 10: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. -%insert ZeroBatchesNNAPILevel3 +%insert-lines ZeroBatchesAPI29 * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit * padding scheme, has to be one of the -%insert PaddingCodeValues +%insert-lines PaddingCodeValues * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when @@ -468,16 +373,16 @@ * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 7: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, out_height, out_width, depth]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -485,7 +390,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation AVERAGE_POOL_2D 1}, @@ -496,16 +401,16 @@ * dimensions except the dimension along the concatenation axis. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.2+ - * (full support since %{NNAPILevel3}, see the input section) +%kind ndk hal_1.2+ + * (full support since %{APILevel29}, see the input section) %/kind -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 @@ -513,20 +418,20 @@ * Inputs: * * 0 ~ n-1: The list of n input tensors, of shape * [D0, D1, ..., Daxis(i), ..., Dm]. -%kind canonical ndk hal_1.2+ - * Before %{NNAPILevel3}, all input tensors of +%kind ndk hal_1.2+ + * Before %{APILevel29}, all input tensors of %else * All input tensors of %/kind * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} * must have the same scale and zeroPoint as the output tensor. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * Input tensors of * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} * are allowed to have different scale and zeroPoint. %/kind -%kind canonical ndk hal_1.2+ - * Since %{NNAPILevel3}, zero-sized tensors are supported. +%kind ndk hal_1.2+ + * Since %{APILevel29}, zero-sized tensors are supported. %/kind * * n: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the * concatenation axis. @@ -534,20 +439,19 @@ * Outputs: * * 0: The output, a tensor of the same {@link %{OperandType}} as the input * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. -%kind canonical ndk hal_1.2+ - * Since %{NNAPILevel3}, for a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, +%kind ndk hal_1.2+ + * Since %{APILevel29}, for a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint values can be different from - * input tensors. Before %{NNAPILevel3} they have to be the same as for the - * input tensors. + * input tensors. Before %{APILevel29} they have to be the same as for the input tensors. %else * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, the scale and zeroPoint * values must be the same as the input tensors'. %/kind -%kind canonical hal_1.3+ +%kind hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint values can be different from input tensors. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation CONCATENATION 2}, @@ -578,8 +482,8 @@ * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to * * * input.scale * filter.scale). * -%kind canonical ndk hal_1.2+ - * Available since %{NNAPILevel3}: +%kind ndk hal_1.2+ + * Available since %{APILevel29}: * * 16 bit floating point: * * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} for input, filter, output, and bias. * @@ -591,32 +495,31 @@ * %/kind %kind ndk hal_1.3+ - * Available since %{NNAPILevel4}: - * * Quantized signed (since %{NNAPILevel4}): + * Available since %{APILevel30}: + * * Quantized signed (since %{APILevel30}): * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to * * * input.scale * filter.scale). * - * * Quantized signed with filter symmetric per channel quantization - * (since %{NNAPILevel4}): + * * Quantized signed with filter symmetric per channel quantization (since %{APILevel30}): * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0, * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). * %/kind -%insert NHWC_NCHW +%insert-lines NHWC_NCHW * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], * specifying the input. -%insert ZeroBatchesNNAPILevel3 +%insert-lines ZeroBatchesAPI29 * * 1: A 4-D tensor, of shape * [depth_out, filter_height, filter_width, depth_in], specifying the * filter. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} * the channel dimension (%{Ann}SymmPerChannelQuantParams::channelDim) * must be set to 0. @@ -624,7 +527,7 @@ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input * tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * %{or_1.2 TENSOR_FLOAT16} the bias must be of the same type. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} * and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, %else @@ -632,7 +535,7 @@ %/kind * the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint * of 0 and bias_scale == input_scale * filter_scale. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL}, * the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint of 0 * and bias_scale of 0. The actual scale of each value 'i' is equal to @@ -653,30 +556,30 @@ * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 10: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. * * 11: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on width dimension. If this input is set, * input 12 (dilation factor for height) must be specified as well. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. * * 12: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on height dimension. If this input is set, * input 11 (dilation factor for width) must be specified as well. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], * specifying the input. -%insert ZeroBatchesNNAPILevel3 +%insert-lines ZeroBatchesAPI29 * * 1: A 4-D tensor, of shape * [depth_out, filter_height, filter_width, depth_in], specifying the * filter. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} * the channel dimension (%{Ann}SymmPerChannelQuantParams::channelDim) * must be set to 0. @@ -685,7 +588,7 @@ * tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * %{or_1.2 TENSOR_FLOAT16} the bias must be of the same * type. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} * and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, %else @@ -693,7 +596,7 @@ %/kind * the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint * of 0 and bias_scale == input_scale * filter_scale. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL}, * the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint of 0 * and bias_scale of 0. The actual scale of each value 'i' is equal to @@ -701,7 +604,7 @@ %/kind * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit * padding scheme, has to be one of the -%insert PaddingCodeValues +%insert-lines PaddingCodeValues * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when @@ -709,29 +612,28 @@ * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 7: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. * * 8: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on width dimension. If this input is set, * input 9 (dilation factor for height) must be specified as well. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. * * 9: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on height dimension. If this input is set, * input 8 (dilation factor for width) must be specified as well. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, out_height, out_width, depth_out]. - * %{BeforeNNAPILevel3For} output tensor of - * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, the following condition must - * be satisfied: output_scale > input_scale * filter_scale -%insert AVAIL1 + * %{BeforeAPILevel29For} output tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, + * the following condition must be satisfied: output_scale > input_scale * filter_scale +%insert-lines AVAIL27 */ %{DeclareOperation CONV_2D 3}, @@ -766,8 +668,8 @@ * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to * * * input.scale * filter.scale). * -%kind canonical ndk hal_1.2+ - * Available since %{NNAPILevel3}: +%kind ndk hal_1.2+ + * Available since %{APILevel29}: * * 16 bit floating point: * * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} for input, filter, output, and bias. * @@ -778,22 +680,21 @@ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). * %/kind -%kind canonical ndk hal_1.3+ - * Available since %{NNAPILevel4}: - * * Quantized signed (since %{NNAPILevel4}): +%kind ndk hal_1.3+ + * Available since %{APILevel30}: + * * Quantized signed (since %{APILevel30}): * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to * * * input.scale * filter.scale). * - * * Quantized signed with filter symmetric per channel quantization - * (since %{NNAPILevel4}): + * * Quantized signed with filter symmetric per channel quantization (since %{APILevel30}): * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0, * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). * %/kind -%insert NHWC_NCHW +%insert-lines NHWC_NCHW * * Both explicit padding and implicit padding are supported. * @@ -802,7 +703,7 @@ * specifying the input. * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], * specifying the filter. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For tensor of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} * the channel dimension (%{Ann}SymmPerChannelQuantParams::channelDim) * must be set to 3. @@ -810,7 +711,7 @@ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input * tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * %{or_1.2 TENSOR_FLOAT16} the bias must be of the same type. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} * and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, %else @@ -818,7 +719,7 @@ %/kind * the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint * of 0 and bias_scale == input_scale * filter_scale. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL}, * the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint of 0 * and bias_scale of 0. The actual scale of each value 'i' is equal to @@ -841,20 +742,20 @@ * * 10: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 11: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. * * 12: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on width dimension. If this input is set, * input 13 (dilation factor for height) must be specified as well. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. * * 13: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on height dimension. If this input is set, * input 12 (dilation factor for width) must be specified as well. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Inputs (implicit padding): @@ -865,7 +766,7 @@ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input * tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * %{or_1.2 TENSOR_FLOAT16} the bias must be of the same type. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} * and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, %else @@ -873,7 +774,7 @@ %/kind * the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint * of 0 and bias_scale == input_scale * filter_scale. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL}, * the bias should be of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, with zeroPoint of 0 * and bias_scale of 0. The actual scale of each value 'i' is equal to @@ -881,7 +782,7 @@ %/kind * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit * padding scheme, has to be one of the -%insert PaddingCodeValues +%insert-lines PaddingCodeValues * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when @@ -891,29 +792,29 @@ * * 7: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 8: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. * * 9: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on width dimension. If this input is set, * input 10 (dilation factor for height) must be specified as well. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. * * 10: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the dilation * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped * cells between each filter element on height dimension. If this input is set, * input 9 (dilation factor for width) must be specified as well. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Outputs: * * 0: The output 4-D tensor, of shape - * [batches, out_height, out_width, depth_out]. %{BeforeNNAPILevel3For} + * [batches, out_height, out_width, depth_out]. %{BeforeAPILevel29For} * output tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, * the following condition must be satisfied: * output_scale > input_scale * filter_scale -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation DEPTHWISE_CONV_2D 4}, @@ -933,16 +834,16 @@ * be divisible by block_size * block_size * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * -%insert NHWC_NCHW +%insert-lines NHWC_NCHW * * Inputs: * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], @@ -950,16 +851,16 @@ * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the block_size. * block_size must be >=1 and block_size * block_size must be a divisor * of the input depth. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 2: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Outputs: * * 0: The output 4-D tensor, of shape [batch, height*block_size, * width*block_size, depth/(block_size*block_size)]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -967,7 +868,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation DEPTH_TO_SPACE 5}, @@ -980,17 +881,17 @@ * * Supported input tensor {@link %{OperandType}}: * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM} (since %{NNAPILevel3}) - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM} (since %{APILevel29}) + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} (since %{APILevel29}) %/kind -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported output tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}. * @@ -998,13 +899,13 @@ * * Inputs: * * 0: A tensor. -%kind canonical ndk hal_1.2+ - * Since %{NNAPILevel3}, this tensor may be zero-sized. +%kind ndk hal_1.2+ + * Since %{APILevel29}, this tensor may be zero-sized. %/kind * * Outputs: * * 0: A tensor with the same shape as input0. -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation DEQUANTIZE 6}, @@ -1029,16 +930,16 @@ * and an error must be reported. * * Supported value tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel30}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel3}) - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{APILevel29}) + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{APILevel29}) %/kind -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported value tensor rank: from 2 @@ -1053,14 +954,14 @@ * * 0: A n-D tensor with the same rank and shape as the Values * tensor, except for the first dimension which has the same size * as Lookups' only dimension. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, %else * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, %/kind * the scale and zeroPoint must be the same as input1. -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation EMBEDDING_LOOKUP 7}, @@ -1068,8 +969,8 @@ * Computes element-wise floor() on the input tensor. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * @@ -1081,7 +982,7 @@ * Outputs: * * 0: The output tensor, of the same {@link %{OperandType}} and dimensions as * the input tensor. -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation FLOOR 8}, @@ -1094,13 +995,13 @@ * outputs = activation(inputs * weights’ + bias) * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4. @@ -1113,8 +1014,8 @@ * number of inputs to the layer, matching the second dimension of * weights, and "batch_size" is calculated by dividing the number of * elements by "input_size". -%kind canonical ndk hal_1.2+ - * Since %{NNAPILevel3}, zero batch_size is supported for this tensor. +%kind ndk hal_1.2+ + * Since %{APILevel29}, zero batch_size is supported for this tensor. %/kind * * 1: A 2-D tensor, specifying the weights, of shape * [num_units, input_size], where "num_units" corresponds to the number @@ -1122,7 +1023,7 @@ * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input * tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, the bias should * also be of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} * and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, %else @@ -1135,10 +1036,10 @@ * invoke on the result. * * Outputs: - * * 0: The output tensor, of shape [batch_size, num_units]. %{BeforeNNAPILevel3For} + * * 0: The output tensor, of shape [batch_size, num_units]. %{BeforeAPILevel29For} * output tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, the following * condition must be satisfied: output_scale > input_scale * filter_scale. -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation FULLY_CONNECTED 9}, @@ -1196,7 +1097,7 @@ * Stored as {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} with offset 0 * and scale 1.0f. * A non-zero byte represents True, a hit. A zero indicates otherwise. -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation HASHTABLE_LOOKUP 10}, @@ -1209,59 +1110,59 @@ * input[batch, row, col, channel] / * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) * -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * By default the axis dimension is the last dimension of the input tensor. * %/kind * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{APILevel29}) %/kind -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * Supported tensor rank: up to 4 - * Tensors with rank less than 4 are only supported since %{NNAPILevel3}. + * Tensors with rank less than 4 are only supported since %{APILevel29}. %else * Supported tensor rank: 4, with "NHWC" data layout (i.e., Num_samples, * Height, Width, and Channels). %/kind * * Inputs: -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 0: An n-D tensor, specifying the tensor to be normalized. %else * * 0: A 4-D tensor, specifying the tensor to be normalized. %/kind -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 1: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, default to -1, * specifying the dimension normalization would be performed on. * Negative index is used to specify axis from the end (e.g. -1 for * the last axis). Must be in the range [-n, n). - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Outputs: * * 0: A tensor of the same {@link %{OperandType}} and same shape as input0. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, * the scale must be 1.f / 128 and the zeroPoint must be 128. %/kind -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, * the scale must be 1.f / 128 and the zeroPoint must be 0. * - * NOTE: Before %{NNAPILevel4}, if the elements along an axis are all zeros, - * the result is undefined. Since %{NNAPILevel4}, if the elements along an axis + * NOTE: Before %{APILevel30}, if the elements along an axis are all zeros, + * the result is undefined. Since %{APILevel30}, if the elements along an axis * are all zeros, the result is logical zero. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation L2_NORMALIZATION 11}, @@ -1278,19 +1179,19 @@ * sum(1)) * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * -%insert NHWC_NCHW +%insert-lines NHWC_NCHW * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. -%insert ZeroBatchesNNAPILevel3 +%insert-lines ZeroBatchesAPI29 * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on * the left, in the ‘width’ dimension. * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on @@ -1310,19 +1211,19 @@ * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 10: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. -%insert ZeroBatchesNNAPILevel3 +%insert-lines ZeroBatchesAPI29 * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit * padding scheme, has to be one of the -%insert PaddingCodeValues +%insert-lines PaddingCodeValues * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when @@ -1334,16 +1235,16 @@ * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 7: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, out_height, out_width, depth]. -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation L2_POOL_2D 12}, @@ -1361,20 +1262,20 @@ * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)) * output = input / pow((bias + alpha * sqr_sum), beta) * -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For input tensor with rank less than 4, independently normalizes each * 1-D slice along specified dimension. * %/kind * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * Supported tensor rank: up to 4 - * Tensors with rank less than 4 are only supported since %{NNAPILevel3}. + * Tensors with rank less than 4 are only supported since %{APILevel29}. %else * Supported tensor rank: 4, with "NHWC" data layout. %/kind @@ -1385,37 +1286,37 @@ * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the radius of * the normalization window. * * 2: A scalar, specifying the bias, must not be zero. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the bias * value must be of {@link %{OperandTypeLinkPfx}FLOAT16}. %/kind * For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, the bias * value must be of {@link %{OperandTypeLinkPfx}FLOAT32}. * * 3: A scalar, specifying the scale factor, alpha. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the * alpha value must be of {@link %{OperandTypeLinkPfx}FLOAT16}. %/kind * For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, the * alpha value must be of {@link %{OperandTypeLinkPfx}FLOAT32}. * * 4: A scalar, specifying the exponent, beta. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the beta * value must be of {@link %{OperandTypeLinkPfx}FLOAT16}. %/kind * For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, the beta * value must be of {@link %{OperandTypeLinkPfx}FLOAT32}. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 5: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, default to -1, * specifying the dimension normalization would be performed on. * Negative index is used to specify axis from the end (e.g. -1 for * the last axis). Must be in the range [-n, n). - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Outputs: * * 0: The output tensor of same shape as input0. -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation LOCAL_RESPONSE_NORMALIZATION 13}, @@ -1427,32 +1328,32 @@ * output = 1 / (1 + exp(-input)) * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. -%kind canonical ndk hal_1.2+ - * Since %{NNAPILevel3}, this tensor may be zero-sized. +%kind ndk hal_1.2+ + * Since %{APILevel29}, this tensor may be zero-sized. %/kind * * Outputs: * * 0: The output tensor of same shape as input0. * For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, * the scale must be 1.f / 256 and the zeroPoint must be 0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, * the scale must be 1.f / 256 and the zeroPoint must be -128. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation LOGISTIC 14}, @@ -1460,8 +1361,8 @@ * Projects an input to a bit vector via locality senstive hashing. * * Supported input tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} @@ -1484,8 +1385,8 @@ * Tensor[1].Dim[0] == Tensor[2].Dim[0] * * 3: Type: * Sparse: -%kind canonical ndk hal_1.2+ - * Value LSHProjectionType_SPARSE(=3) (since %{NNAPILevel3}). +%kind ndk hal_1.2+ + * Value LSHProjectionType_SPARSE(=3) (since %{APILevel29}). %else * Value LSHProjectionType_SPARSE(=1). %/kind @@ -1493,7 +1394,7 @@ * Each output element is an int32 made up of multiple bits * computed from hash functions. * -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * NOTE: To avoid collisions across hash functions, an offset value * of k * (1 << Tensor[0].Dim[1]) will be added to each signature, * where k is the index of the hash function. @@ -1516,9 +1417,9 @@ * If the projection type is Dense: * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] } * A flattened tensor that represents projected bit vectors. -%insert AVAIL1 -%kind canonical ndk hal_1.2+ - * The offset value for sparse projections was added in %{NNAPILevel3}. +%insert-lines AVAIL27 +%kind ndk hal_1.2+ + * The offset value for sparse projections was added in %{APILevel29}. %/kind */ %{DeclareOperation LSH_PROJECTION 15}, @@ -1575,8 +1476,8 @@ * matrix, each element of which is the product of the corresponding * elements of the input matrices. * -%kind canonical ndk hal_1.2+ - * Since %{NNAPILevel3} LSTM supports layer normalization. +%kind ndk hal_1.2+ + * Since %{APILevel29} LSTM supports layer normalization. * In case layer normalization is used, the inputs to internal activation * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered * following an approach from section 3.1 from @@ -1604,8 +1505,8 @@ * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a * value if the recurrent projection layer exists, and should otherwise * have no value. -%kind canonical ndk hal_1.2+ - * * (%{NNAPILevel3} or later) The four layer normalization weights either all have +%kind ndk hal_1.2+ + * * (%{APILevel29} or later) The four layer normalization weights either all have * values or none of them have values. Additionally, if CIFG is used, * input layer normalization weights tensor is omitted and the other layer * normalization weights either all have values or none of them have @@ -1632,15 +1533,15 @@ * http://arxiv.org/pdf/1503.04069.pdf * Greff et al. "LSTM: A Search Space Odyssey" * -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * The layer normalization is based on: * https://arxiv.org/pdf/1607.06450.pdf * Jimmy Ba et al. "Layer Normalization" * %/kind * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * @@ -1704,9 +1605,9 @@ * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 * then clipping is disabled. -%kind canonical ndk hal_1.2+ - * Until %{NNAPILevel3} this scalar must be of type {@link - * %{OperandTypeLinkPfx}FLOAT32}. Since %{NNAPILevel3}, if all the input +%kind ndk hal_1.2+ + * Until %{APILevel29} this scalar must be of type {@link + * %{OperandTypeLinkPfx}FLOAT32}. Since %{APILevel29}, if all the input * tensors have type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, this * scalar must be of the type {@link %{OperandTypeLinkPfx}FLOAT32}, * otherwise if all the input tensors have the type {@link @@ -1716,15 +1617,15 @@ * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the * projection layer, such that values are bound within * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. -%kind canonical ndk hal_1.2+ - * Until %{NNAPILevel3} this scalar must be of type {@link - * %{OperandTypeLinkPfx}FLOAT32}. Since %{NNAPILevel3}, if all the input +%kind ndk hal_1.2+ + * Until %{APILevel29} this scalar must be of type {@link + * %{OperandTypeLinkPfx}FLOAT32}. Since %{APILevel29}, if all the input * tensors have type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, this * scalar must be of the type {@link %{OperandTypeLinkPfx}FLOAT32}, * otherwise if all the input tensors have the type {@link * %{OperandTypeLinkPfx}TENSOR_FLOAT16}, this scalar must be of type {@link * %{OperandTypeLinkPfx}FLOAT16}. - * Since %{NNAPILevel3} there are additional inputs to this op: + * Since %{APILevel29} there are additional inputs to this op: * * 23:The input layer normalization weights. * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs * to activation at input gate. @@ -1750,7 +1651,7 @@ * * 3: The output (\f$o_t\f$). * A 2-D tensor of shape [batch_size, output_size]. This is effectively * the same as the current “output state (out)” value. -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation LSTM 16}, @@ -1768,23 +1669,23 @@ * ) * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * -%insert NHWC_NCHW +%insert-lines NHWC_NCHW * * Both explicit padding and implicit padding are supported. * * Inputs (explicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. -%insert ZeroBatchesNNAPILevel3 +%insert-lines ZeroBatchesAPI29 * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on * the left, in the ‘width’ dimension. * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the padding on @@ -1804,19 +1705,19 @@ * * 9: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 10: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Inputs (implicit padding): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. -%insert ZeroBatchesNNAPILevel3 +%insert-lines ZeroBatchesAPI29 * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit * padding scheme, has to be one of the -%insert PaddingCodeValues +%insert-lines PaddingCodeValues * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when @@ -1828,16 +1729,16 @@ * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 7: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, out_height, out_width, depth]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -1845,7 +1746,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation MAX_POOL_2D 17}, @@ -1864,16 +1765,16 @@ * of the input operands. It starts with the trailing dimensions, and works * its way forward. * -%insert GenericZero +%insert-lines GenericZero * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) - * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) + * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 @@ -1885,14 +1786,14 @@ * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, * the {@link %{FusedActivationFunc}} must be "NONE". %/kind * * Outputs: * * 0: The product, a tensor of the same {@link %{OperandType}} as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For output tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} * and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, * the following condition must be satisfied: @@ -1902,7 +1803,7 @@ * the following condition must be satisfied: * output_scale > input1_scale * input2_scale. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation MUL 18}, @@ -1914,26 +1815,26 @@ * output = max(0, input) * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. -%kind canonical ndk hal_1.2+ - * Since %{NNAPILevel3}, this tensor may be zero-sized. +%kind ndk hal_1.2+ + * Since %{APILevel29}, this tensor may be zero-sized. %/kind * * Outputs: * * 0: The output tensor of same shape as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -1941,7 +1842,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation RELU 19}, @@ -1953,26 +1854,26 @@ * output = min(1.f, max(-1.f, input)) * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. -%kind canonical ndk hal_1.2+ - * Since %{NNAPILevel3}, this tensor may be zero-sized. +%kind ndk hal_1.2+ + * Since %{APILevel29}, this tensor may be zero-sized. %/kind * * Outputs: * * 0: The output tensor of the same shape as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -1980,7 +1881,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation RELU1 20}, @@ -1992,26 +1893,26 @@ * output = min(6, max(0, input)) * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. -%kind canonical ndk hal_1.2+ - * Since %{NNAPILevel3}, this tensor may be zero-sized. +%kind ndk hal_1.2+ + * Since %{APILevel29}, this tensor may be zero-sized. %/kind * * Outputs: * * 0: The output tensor of same shape as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -2019,7 +1920,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation RELU6 21}, @@ -2030,13 +1931,13 @@ * tensor, but with a newly specified shape. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4. @@ -2054,7 +1955,7 @@ * * Outputs: * * 0: The output tensor, of shape specified by the input shape. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -2062,7 +1963,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation RESHAPE 22}, @@ -2074,52 +1975,52 @@ * same as corner pixels of input. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{APILevel29}) %/kind -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * -%insert NHWC_NCHW +%insert-lines NHWC_NCHW * -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * Both resizing by shape and resizing by scale are supported. * %/kind * Inputs (resizing by shape): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. -%insert ZeroBatchesNNAPILevel3 +%insert-lines ZeroBatchesAPI29 * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the output * width of the output tensor. * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the output * height of the output tensor. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 3: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * * 4: Align corners. An optional {@link %{OperandTypeLinkPfx}BOOL} * scalar, default to false. If True, the centers of the 4 corner * pixels of the input and output tensors are aligned, preserving the * values at the corner pixels. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. * * 5: Half pixel centers. An optional {@link %{OperandTypeLinkPfx}BOOL} * scalar, default to false. If True, the pixel centers are assumed to * be at (0.5, 0.5). This is the default behavior of image.resize in * TF 2.0. If this parameter is True, then align_corners parameter * must be False. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. %/kind -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * - * Inputs (resizing by scale, since %{NNAPILevel3}): + * Inputs (resizing by scale, since %{APILevel29}): * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying * the input. Zero batches is supported for this tensor. * * 1: A scalar, specifying width_scale, the scaling factor of the width @@ -2137,33 +2038,33 @@ * * 3: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. %/kind -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * * 4: Align corners. An optional {@link %{OperandTypeLinkPfx}BOOL} * scalar, default to false. If True, the centers of the 4 corner * pixels of the input and output tensors are aligned, preserving the * values at the corner pixels. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. * * 5: Half pixel centers. An optional {@link %{OperandTypeLinkPfx}BOOL} * scalar, default to false. If True, the pixel centers are assumed to * be at (0.5, 0.5). This is the default behavior of image.resize in * TF 2.0. If this parameter is True, then align_corners parameter * must be False. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. %/kind * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, new_height, new_width, depth]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%kind canonical ndk hal_1.2 +%kind ndk hal_1.2 * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation RESIZE_BILINEAR 23}, @@ -2184,8 +2085,8 @@ * argument (if not “NONE”). * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * @@ -2218,7 +2119,7 @@ * * 1: output. * A 2-D tensor of shape [batch_size, num_units]. This is effectively * the same as the current state value. -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation RNN 24}, @@ -2237,29 +2138,29 @@ * independently on each 1-D slice along specified dimension. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * %kind hal_1.0 hal_1.1 * Supported tensor rank: 2 or 4. %/kind -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * Supported tensor rank: up to 4. - * Tensors with rank other than 2 or 4 are only supported since %{NNAPILevel3}. + * Tensors with rank other than 2 or 4 are only supported since %{APILevel29}. %/kind * * Inputs: * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. -%kind canonical ndk hal_1.2+ - * Since %{NNAPILevel3}, this tensor may be zero-sized. +%kind ndk hal_1.2+ + * Since %{APILevel29}, this tensor may be zero-sized. %/kind -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * * 1: A scalar, specifying the positive scaling factor for the exponent, * beta. If input0 is of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or @@ -2271,27 +2172,27 @@ * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, the scalar must be of * {@link %{OperandTypeLinkPfx}FLOAT32}. %/kind -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * If input0 is of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, then the * scalar must be of {@link %{OperandTypeLinkPfx}FLOAT16}. %/kind -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 2: An optional {@link %{OperandTypeLinkPfx}INT32} scalar, default to -1, * specifying the dimension the activation would be performed on. * Negative index is used to specify axis from the end (e.g. -1 for * the last axis). Must be in the range [-n, n). - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Outputs: * * 0: The output tensor of same shape as input0. * For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, * the scale must be 1.f / 256 and the zeroPoint must be 0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, * the scale must be 1.f / 256 and the zeroPoint must be -128. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation SOFTMAX 25}, @@ -2310,16 +2211,16 @@ * The input tensor's height and width must be divisible by block_size. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * -%insert NHWC_NCHW +%insert-lines NHWC_NCHW * * Inputs: * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], @@ -2327,16 +2228,16 @@ * * 1: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the block_size. * block_size must be >=1 and block_size must be a divisor of both the * input height and width. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 2: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Outputs: * * 0: The output 4-D tensor, of shape [batches, height/block_size, * width/block_size, depth_in*block_size*block_size]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -2344,7 +2245,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation SPACE_TO_DEPTH 26}, @@ -2388,8 +2289,8 @@ * the filters. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * @@ -2424,7 +2325,7 @@ * * 1: output. * A 2-D tensor of the same {@link %{OperandType}} as the inputs, with shape * [batch_size, num_units]. -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation SVDF 27}, @@ -2436,36 +2337,36 @@ * output = tanh(input) * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{APILevel29}) %/kind -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4. * * Inputs: * * 0: A tensor, specifying the input. -%kind canonical ndk hal_1.2+ - * Since %{NNAPILevel3}, this tensor may be zero-sized. +%kind ndk hal_1.2+ + * Since %{APILevel29}, this tensor may be zero-sized. %/kind * * Outputs: * * 0: The output tensor of same shape as input0. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, * the scale must be 1.f / 128 and the zeroPoint must be 128. %/kind -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, * the scale must be 1.f / 128 and the zeroPoint must be 0. %/kind -%insert AVAIL1 +%insert-lines AVAIL27 */ %{DeclareOperation TANH 28}, %/section @@ -2487,23 +2388,23 @@ * This is the reverse of SpaceToBatch. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * -%insert NHWC_NCHW +%insert-lines NHWC_NCHW * * Inputs: * * 0: An n-D tensor, specifying the tensor to be reshaped * * 1: A 1-D Tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, the block * sizes for each spatial dimension of the input tensor. All values * must be >= 1. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 2: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. * Available since API level 29. @@ -2511,7 +2412,7 @@ * * Outputs: * * 0: A tensor of the same {@link %{OperandType}} as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -2519,7 +2420,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL2 +%insert-lines AVAIL28 */ %{DeclareOperation BATCH_TO_SPACE_ND 29}, @@ -2530,7 +2431,7 @@ * dimensions. The output is the result of dividing the first input tensor * by the second, optionally modified by an activation function. * -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For inputs of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, performs * "floor division" ("//" in Python). For example, * 5 // 2 = 2 @@ -2550,14 +2451,14 @@ * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * -%insert GenericZero +%insert-lines GenericZero * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 @@ -2569,14 +2470,14 @@ * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, * the {@link %{FusedActivationFunc}} must be "NONE". %/kind * * Outputs: * * 0: A tensor of the same {@link %{OperandType}} as input0. -%insert AVAIL2 +%insert-lines AVAIL28 */ %{DeclareOperation DIV 30}, @@ -2589,13 +2490,13 @@ * length 1. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 @@ -2616,7 +2517,7 @@ * * Outputs: * * 0: A tensor of the same {@link %{OperandType}} as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -2626,7 +2527,7 @@ %/kind * If all dimensions are reduced and keep_dims is false, the output * shape is [1]. -%insert AVAIL2 +%insert-lines AVAIL28 */ %{DeclareOperation MEAN 31}, @@ -2636,16 +2537,16 @@ * This operation pads a tensor according to the specified paddings. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind -%kind canonical ndk hal_1.2+ - * (full support since %{NNAPILevel3}, see the output section) +%kind ndk hal_1.2+ + * (full support since %{APILevel29}, see the output section) %else * (the pad value is undefined) %/kind @@ -2670,7 +2571,7 @@ * of the padding: * output0.dimension[i] = * padding[i, 0] + input0.dimension[i] + padding[i, 1] -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -2678,13 +2579,13 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * - * NOTE: Before %{NNAPILevel3}, the pad value for + * NOTE: Before %{APILevel29}, the pad value for * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} is undefined. - * Since %{NNAPILevel3}, the pad value is always the logical zero. + * Since %{APILevel29}, the pad value is always the logical zero. %/kind -%insert AVAIL2 +%insert-lines AVAIL28 */ %{DeclareOperation PAD 32}, @@ -2700,21 +2601,21 @@ * dimensions of the input are optionally zero padded according to paddings. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind -%kind canonical ndk hal_1.2+ - * (full support since %{NNAPILevel3}, see the output section) +%kind ndk hal_1.2+ + * (full support since %{APILevel29}, see the output section) %else * (the pad value is undefined) %/kind * -%insert NHWC_NCHW +%insert-lines NHWC_NCHW * * Inputs: * * 0: An n-D tensor, specifying the input. @@ -2729,15 +2630,15 @@ * front of dimension i. * padding[i, 1] specifies the number of element to be padded after the * end of dimension i. -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * * 3: An optional {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. - * Available since %{NNAPILevel3}. + * Available since %{APILevel29}. %/kind * * Outputs: * * 0: A tensor of the same {@link %{OperandType}} as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -2745,13 +2646,13 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%kind canonical ndk hal_1.2+ +%kind ndk hal_1.2+ * - * NOTE: Before %{NNAPILevel3}, the pad value for + * NOTE: Before %{APILevel29}, the pad value for * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} is undefined. - * Since %{NNAPILevel3}, the pad value is always the logical zero. + * Since %{APILevel29}, the pad value is always the logical zero. %/kind -%insert AVAIL2 +%insert-lines AVAIL28 */ %{DeclareOperation SPACE_TO_BATCH_ND 33}, @@ -2764,13 +2665,13 @@ * dimensions by specifying the axes (input1). * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 @@ -2787,7 +2688,7 @@ * * 0: A tensor of the same {@link %{OperandType}} as input0. Contains the * same data as input, but has one or more dimensions of size 1 * removed. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -2797,7 +2698,7 @@ %/kind * If all input dimensions are equal to 1 and are to be squeezed, the * output shape is [1]. -%insert AVAIL2 +%insert-lines AVAIL28 */ %{DeclareOperation SQUEEZE 34}, @@ -2811,13 +2712,13 @@ * reverse slice. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 @@ -2848,7 +2749,7 @@ * Outputs: * * 0: A tensor of the same {@link %{OperandType}} as input0 and rank (n - k), * where k is the number of bits set in shrink_axis_mask. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -2858,7 +2759,7 @@ %/kind * If shrink_axis_mask is true for all input dimensions, the output * shape is [1]. -%insert AVAIL2 +%insert-lines AVAIL28 */ %{DeclareOperation STRIDED_SLICE 35}, @@ -2882,18 +2783,18 @@ * input2.dimension = {5, 4, 3, 1} * output.dimension = {5, 4, 3, 2} * -%insert GenericZero +%insert-lines GenericZero * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} (since %{APILevel29}) %/kind -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) - * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) + * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 @@ -2905,7 +2806,7 @@ * * 2: An {@link %{OperandTypeLinkPfx}INT32} scalar, and has to be one of the * {@link %{FusedActivationFunc}} values. Specifies the activation to * invoke on the result. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, * the {@link %{FusedActivationFunc}} must be "NONE". %/kind @@ -2916,12 +2817,12 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. %/kind -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. %/kind -%insert AVAIL2 +%insert-lines AVAIL28 */ %{DeclareOperation SUB 36}, @@ -2935,28 +2836,28 @@ * regular matrix transpose on 2-D input Tensors. * * Supported tensor {@link %{OperandType}}: -%kind canonical ndk hal_1.2+ - * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{NNAPILevel3}) +%kind ndk hal_1.2+ + * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} (since %{APILevel29}) %/kind * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 * * Inputs: * * 0: An n-D tensor, specifying the tensor to be transposed. -%kind canonical ndk hal_1.2+ - * Since %{NNAPILevel3}, this tensor may be zero-sized. +%kind ndk hal_1.2+ + * Since %{APILevel29}, this tensor may be zero-sized. %/kind * * 1: An optional 1-D Tensor of {@link %{OperandTypeLinkPfx}TENSOR_INT32}, * the permutation of the dimensions of the input tensor. * * Outputs: * * 0: A tensor of the same {@link %{OperandType}} as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -2964,7 +2865,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL2 +%insert-lines AVAIL28 */ %{DeclareOperation TRANSPOSE 37}, %/section @@ -2980,10 +2881,9 @@ * * Values of this operand type are either true or false. A zero value * represents false; any other value represents true. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{ANN}BOOL = 6, -%insert canonical_empty_line /** * A tensor of 16 bit signed integers that represent real numbers. * @@ -2992,31 +2892,27 @@ * realValue = integerValue * scale. * * scale is a 32 bit floating point with value greater than zero. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{ANN}TENSOR_QUANT16_SYMM = 7, -%insert canonical_empty_line /** * A tensor of IEEE 754 16 bit floating point values. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{ANN}TENSOR_FLOAT16 = 8, -%insert canonical_empty_line /** * A tensor of 8 bit boolean values. * * Values of this operand type are either true or false. A zero value * represents false; any other value represents true. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{ANN}TENSOR_BOOL8 = 9, -%insert canonical_empty_line /** * An IEEE 754 16 bit floating point scalar value. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{ANN}FLOAT16 = 10, -%insert canonical_empty_line /** * A tensor of 8 bit signed integers that represent real numbers. * @@ -3032,7 +2928,7 @@ * to set the parameters for an Operand of this type. * %/kind -%kind canonical hal_1.2+ +%kind hal_1.2+ *{@link %{Ann}SymmPerChannelQuantParams} must hold the parameters for an Operand of this type. %/kind * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0). @@ -3041,10 +2937,9 @@ * realValue[..., C, ...] = * integerValue[..., C, ...] * scales[C] * where C is an index in the Channel dimension. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{ANN}TENSOR_QUANT8_SYMM_PER_CHANNEL = 11, -%insert canonical_empty_line /** * A tensor of 16 bit unsigned integers that represent real numbers. * @@ -3055,10 +2950,9 @@ * * The formula is: * real_value = (integer_value - zeroPoint) * scale. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{ANN}TENSOR_QUANT16_ASYMM = 12, -%insert canonical_empty_line /** * A tensor of 8 bit signed integers that represent real numbers. * @@ -3067,7 +2961,7 @@ * realValue = integerValue * scale. * * scale is a 32 bit floating point with value greater than zero. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{ANN}TENSOR_QUANT8_SYMM = 13, %/section @@ -3088,8 +2982,8 @@ * Supported tensor {@link %{OperandType}}: * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1. @@ -3099,7 +2993,7 @@ * * Outputs: * * 0: The output tensor of same shape as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 ABS 38}, @@ -3111,8 +3005,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -3126,7 +3020,7 @@ * Outputs: * * 0: An (n - 1)-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor. * If input is 1-dimensional, the output shape is [1]. -%insert AVAIL3 +%insert-lines AVAIL29 */ // There is no underscore in ARG_MAX to avoid name conflict with // the macro defined in libc/kernel/uapi/linux/limits.h. @@ -3140,8 +3034,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -3155,7 +3049,7 @@ * Outputs: * * 0: An (n - 1)-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor. * If input is 1-dimensional, the output shape is [1]. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 ARGMIN 40}, // See ARGMAX for naming discussion. @@ -3186,7 +3080,7 @@ * and height, dw and dh is the log-scale relative correction factor * for the width and height. For input0 of type * {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, this tensor should be -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is %else @@ -3207,7 +3101,7 @@ * output bounding box for each class, with format [x1, y1, x2, y2]. * For type of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, the * scale must be 0.125 and the zero point must be 0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 AXIS_ALIGNED_BBOX_TRANSFORM 41}, @@ -3245,8 +3139,8 @@ * allows to connect both forward and backward outputs from previous cell * to the next cell's input. * -%kind canonical ndk hal_1.3+ - * Since %{NNAPILevel4} parallel linking mode is supported. The mode is +%kind ndk hal_1.3+ + * Since %{APILevel30} parallel linking mode is supported. The mode is * enabled if auxiliary input is present but auxiliary weights are omitted. * In this case, the cell feeds inputs into the RNN in the following way: * @@ -3361,7 +3255,7 @@ * A 2-D tensor of shape [batch_size, bw_output_size]. * * 38: The backward input cell state. * A 2-D tensor of shape [batch_size, bw_num_units]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * * 39: The auxiliary input. Optional. * A 3-D tensor of shape [max_time, batch_size, aux_input_size], * where “batch_size” corresponds to the batching dimension, and @@ -3484,34 +3378,34 @@ * A 3-D tensor of shape: * If time-major: [max_time, batch_size, bw_output_size] * If batch-major: [batch_size, max_time, bw_output_size] -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * * 2: The forward activation state output. * A 2-D tensor of shape [batch_size, fw_output_size] containing an * activation state from the last time step in the sequence. This * output is optional and can be omitted. If this output is present * then outputs 3-5 must be present as well. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. * * 3: The forward cell state output. * A tensor of shape [batch_size, fw_cell_size] containing a cell state * from the last time step in the sequence. This output is optional * and can be omitted. If this output is present * then outputs 2, 4, 5 must be present as well. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. * * 4: The backward activation state output. * A 2-D tensor of shape [batch_size, bw_output_size] containing an * activation state from the last time step in the sequence. This * output is optional and can be omitted. If this output is present * then outputs 2, 3, 5 must be present as well. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. * * 5: The backward cell state output. * A tensor of shape [batch_size, bw_cell_size] containing a cell state * from the last time step in the sequence. This output is optional * and can be omitted. If this output is present * then outputs 2-4 must be present as well. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. %/kind -%insert AVAIL3 -%insert OutputState +%insert-lines AVAIL29 +%insert-lines OutputState */ %{DeclareOperation_1.2 BIDIRECTIONAL_SEQUENCE_LSTM 42}, @@ -3569,8 +3463,8 @@ * allows to connect both forward and backward outputs from previous cell * to the next cell's input. * -%kind canonical ndk hal_1.3+ - * Since %{NNAPILevel4} parallel linking mode is supported. The mode is +%kind ndk hal_1.3+ + * Since %{APILevel30} parallel linking mode is supported. The mode is * enabled if auxiliary input is present but auxiliary weights are omitted. * In this case, the cell feeds inputs into the RNN in the following way: * @@ -3617,7 +3511,7 @@ * * 8: bwHiddenState * A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden * state input for the first time step of the computation. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * * 9: auxInput. * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If * it is set to true, then the input has a shape [maxTime, batchSize, @@ -3663,22 +3557,22 @@ * (timeMajor). If it is set to true, then the shape is set to * [maxTime, batchSize, bwNumUnits], otherwise the shape is set to * [batchSize, maxTime, bwNumUnits]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * * 2: The forward hidden state output. * A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden * state from the last time step in the sequence. This output is * optional and can be omitted. If this output is present then output * 3 must be present as well. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. * * 3: The backward hidden state output. * A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden * state from the last time step in the sequence. This output is * optional and can be omitted. If this output is present then output * 2 must be present as well. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. %/kind -%insert AVAIL3 -%insert OutputState +%insert-lines AVAIL29 +%insert-lines OutputState */ %{DeclareOperation_1.2 BIDIRECTIONAL_SEQUENCE_RNN 43}, @@ -3705,8 +3599,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Inputs: @@ -3720,7 +3614,7 @@ * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, this tensor should be of * {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and * scale of 0.125. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For input0 of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, * this tensor should be of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, * with zeroPoint of -128 and scale of 0.125. @@ -3752,7 +3646,7 @@ * [num_output_rois], specifying the score of each output box. The boxes * are grouped by batches, but the sequential order in each batch is not * guaranteed. For type of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * guaranteed. For type of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} * or {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, %else @@ -3771,7 +3665,7 @@ * * 3: A 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, of shape * [num_output_rois], specifying the batch index of each box. Boxes * with the same batch index are grouped together. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 BOX_WITH_NMS_LIMIT 44}, @@ -3787,8 +3681,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * Since %{NNAPILevel4}, casting tensors of the following +%kind ndk hal_1.3+ + * Since %{APILevel30}, casting tensors of the following * {@link %{OperandType}} to the same {@link %{OperandType}} is supported: * * {@link %{OperandTypeLinkPfx}TENSOR_BOOL8} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} @@ -3805,7 +3699,7 @@ * * Outputs: * * 0: A tensor with the same shape as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 CAST 45}, @@ -3828,8 +3722,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 @@ -3845,7 +3739,7 @@ * * Outputs: * * 0: A tensor of the same {@link %{OperandType}} and same shape as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -3853,7 +3747,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 CHANNEL_SHUFFLE 46}, @@ -3934,7 +3828,7 @@ * output detection. * * 3: An 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, of shape [batches], * specifying the number of valid output detections for each batch. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 DETECTION_POSTPROCESSING 47}, @@ -3947,8 +3841,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -3962,7 +3856,7 @@ * * Outputs: * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 EQUAL 48}, @@ -3980,7 +3874,7 @@ * * Outputs: * * 0: The output tensor of same shape as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 EXP 49}, @@ -3997,8 +3891,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -4011,14 +3905,14 @@ * Outputs: * * 0: An (n + 1)-D tensor with the same {@link %{OperandType}} and data as * input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, %else * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, %/kind * the scale and zeroPoint must be the same as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 EXPAND_DIMS 50}, @@ -4041,8 +3935,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -4058,14 +3952,14 @@ * * Outputs: * * 0: An (n + k - 1)-D tensor with the same {@link %{OperandType}} as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, %else * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, %/kind * the scale and zeroPoint must be the same as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 GATHER 51}, @@ -4085,8 +3979,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Inputs: @@ -4105,7 +3999,7 @@ * dimensions is the channel dimension. * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each * predefined anchor, with format [x1, y1, x2, y2]. For input0 of type -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of %else @@ -4114,7 +4008,7 @@ * {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_SYMM}, with scale of 0.125. * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of * each image in the batch, with format [image_height, image_width]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For input0 of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, this %else @@ -4146,7 +4040,7 @@ * [num_output_rois], specifying the score of each output box. * The boxes are grouped by batches, but the sequential order in * each batch is not guaranteed. For type of -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero %else @@ -4162,7 +4056,7 @@ * * 2: A 1-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor, of shape * [num_output_rois], specifying the batch index of each box. Boxes * with the same batch index are grouped together. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 GENERATE_PROPOSALS 52}, @@ -4175,8 +4069,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -4190,7 +4084,7 @@ * * Outputs: * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 GREATER 53}, /** @@ -4202,8 +4096,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -4217,7 +4111,7 @@ * * Outputs: * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 GREATER_EQUAL 54}, @@ -4261,9 +4155,9 @@ * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} for input, filter, and output. * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to * * * input.scale * filter.scale). -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * - * * Quantized signed (since %{NNAPILevel4}): + * * Quantized signed (since %{APILevel30}): * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to * * * input.scale * filter.scale). @@ -4274,10 +4168,9 @@ * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0, * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * - * * Quantized signed with filter symmetric per channel quantization - * (since %{NNAPILevel4}): + * * Quantized signed with filter symmetric per channel quantization (since %{APILevel30}): * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0, @@ -4303,7 +4196,7 @@ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input * tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} or * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the bias must be of the same type. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} %else @@ -4348,7 +4241,7 @@ * tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} or * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the bias must be of the same * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the bias must be of the same type. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} %else @@ -4362,7 +4255,7 @@ * bias_scale[i] = input_scale * filter_scale[i]. * * 3: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit * padding scheme, has to be one of the -%insert PaddingCodeValues +%insert-lines PaddingCodeValues * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when @@ -4378,7 +4271,7 @@ * Outputs: * * 0: The output 4-D tensor, of shape * [batches, out_height, out_width, depth_out]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. @@ -4386,7 +4279,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. %/kind -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 GROUPED_CONV_2D 55}, @@ -4405,8 +4298,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. @@ -4424,7 +4317,7 @@ * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}, this tensor should * be of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, with zeroPoint * of 0 and scale of 0.125. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For input0 of type * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, this tensor * should be of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, with @@ -4436,7 +4329,7 @@ * Outputs: * * 0: A tensor of the same {@link %{OperandType}} as input0, with shape * [num_boxes, num_keypoints], specifying score of the keypoints. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint can be different from input0 scale and zeroPoint. @@ -4450,7 +4343,7 @@ * [keypoint_x, keypoint_y]. * For type of {@link %{OperandTypeLinkPfx}TENSOR_QUANT16_ASYMM}, the * scale must be 0.125 and the zero point must be 0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 HEATMAP_MAX_KEYPOINT 56}, @@ -4502,7 +4395,7 @@ * * Outputs: * * 0: A tensor of the same {@link %{OperandType}} and same shape as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 INSTANCE_NORMALIZATION 57}, @@ -4515,8 +4408,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -4530,7 +4423,7 @@ * * Outputs: * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 LESS 58}, @@ -4543,8 +4436,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -4558,7 +4451,7 @@ * * Outputs: * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 LESS_EQUAL 59}, @@ -4576,7 +4469,7 @@ * * Outputs: * * 0: The output tensor of same shape as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 LOG 60}, @@ -4597,7 +4490,7 @@ * * Outputs: * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 LOGICAL_AND 61}, @@ -4614,7 +4507,7 @@ * * Outputs: * * 0: The output tensor of same shape as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 LOGICAL_NOT 62}, @@ -4635,7 +4528,7 @@ * * Outputs: * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 LOGICAL_OR 63}, @@ -4667,7 +4560,7 @@ * Outputs: * * 0: The output tensor of the same {@link %{OperandType}} and shape as * input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 LOG_SOFTMAX 64}, @@ -4679,8 +4572,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1. @@ -4694,7 +4587,7 @@ * * Outputs: * * 0: A tensor of the same {@link %{OperandType}} as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. @@ -4702,7 +4595,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. %/kind -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 MAXIMUM 65}, @@ -4714,8 +4607,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1. @@ -4729,7 +4622,7 @@ * * Outputs: * * 0: A tensor of the same {@link %{OperandType}} as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. @@ -4737,7 +4630,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. %/kind -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 MINIMUM 66}, @@ -4756,7 +4649,7 @@ * * Outputs: * * 0: The output tensor of same shape as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 NEG 67}, @@ -4769,8 +4662,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -4784,7 +4677,7 @@ * * Outputs: * * 0: A tensor of {@link %{OperandTypeLinkPfx}TENSOR_BOOL8}. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 NOT_EQUAL 68}, @@ -4796,8 +4689,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 @@ -4811,12 +4704,12 @@ * front of dimension i. * padding[i, 1] specifies the number of elements to be padded after * the end of dimension i. - * * 2: A scalar specifying the value to use for padding input0. + * * 2: An scalar specifying the value to use for padding input0. * For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the * pad value must be of {@link %{OperandTypeLinkPfx}FLOAT16}. * For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32}, the * pad value must be of {@link %{OperandTypeLinkPfx}FLOAT32}. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For input tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, %else @@ -4833,7 +4726,7 @@ * of the padding: * output0.dimension[i] = * padding[i, 0] + input0.dimension[i] + padding[i, 1] -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -4841,7 +4734,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 PAD_V2 69}, @@ -4872,7 +4765,7 @@ * * Outputs: * * 0: An output tensor. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 POW 70}, @@ -4900,8 +4793,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -4913,7 +4806,7 @@ * * Outputs: * * 0: A tensor of the same {@link %{OperandType}} as input0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scales and zeroPoint can be different from input0 scale and zeroPoint. @@ -4921,7 +4814,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scales and zeroPoint can be different from input0 scale and zeroPoint. %/kind -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 PRELU 71}, @@ -4932,7 +4825,7 @@ * * output = max(0, min(255, round(input / scale) + zeroPoint) * -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * The formula for {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} output * tensor is: * @@ -4945,8 +4838,8 @@ * * Supported output tensor {@link %{OperandType}}: * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -4956,13 +4849,13 @@ * * Outputs: * * 0: The output tensor of same shape as input0, but with -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} or. * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}. %else * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM}. %/kind -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 QUANTIZE 72}, @@ -5089,7 +4982,7 @@ * Outputs: * * 0: A 2-D {@link %{OperandTypeLinkPfx}TENSOR_INT32} tensor with shape * [batches, samples], containing the drawn samples. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 RANDOM_MULTINOMIAL 74}, @@ -5117,7 +5010,7 @@ * * 0: A tensor of the same {@link %{OperandType}} as input0. * If all dimensions are reduced and keep_dims is false, the output * shape is [1]. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 REDUCE_ALL 75}, @@ -5145,7 +5038,7 @@ * * 0: A tensor of the same {@link %{OperandType}} as input0. * If all dimensions are reduced and keep_dims is false, the output * shape is [1]. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 REDUCE_ANY 76}, @@ -5161,8 +5054,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 @@ -5178,7 +5071,7 @@ * * 0: A tensor of the same {@link %{OperandType}} as input0. * If all dimensions are reduced and keep_dims is false, the output * shape is [1]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -5186,7 +5079,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 REDUCE_MAX 77}, @@ -5202,8 +5095,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: up to 4 @@ -5219,7 +5112,7 @@ * * 0: A tensor of the same {@link %{OperandType}} as input0. * If all dimensions are reduced and keep_dims is false, the output * shape is [1]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -5227,7 +5120,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 REDUCE_MIN 78}, @@ -5255,7 +5148,7 @@ * * 0: A tensor of the same {@link %{OperandType}} as input0. * If all dimensions are reduced and keep_dims is false, the output * shape is [1]. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 REDUCE_PROD 79}, @@ -5283,7 +5176,7 @@ * * 0: A tensor of the same {@link %{OperandType}} as input0. * If all dimensions are reduced and keep_dims is false, the output * shape is [1]. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 REDUCE_SUM 80}, @@ -5304,8 +5197,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. @@ -5345,14 +5238,14 @@ * Outputs: * * 0: A tensor of the same {@link %{OperandType}} as input0. The output * shape is [num_rois, out_height, out_width, depth]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, %else * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, %/kind * the scale and zeroPoint can be different from the input0 scale and zeroPoint. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 ROI_ALIGN 81}, @@ -5372,8 +5265,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16} * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. @@ -5385,7 +5278,7 @@ * * 0: A 4-D tensor, specifying the feature map. * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of * the regions of interest, each line with format [x1, y1, x2, y2]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For input0 of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, %else @@ -5410,14 +5303,14 @@ * Outputs: * * 0: A tensor of the same {@link %{OperandType}} as input0. The output * shape is [num_rois, out_height, out_width, depth]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For input0 of type {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, %else * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, %/kind * the scale and zeroPoint must be the same as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 ROI_POOLING 82}, @@ -5435,7 +5328,7 @@ * * Outputs: * * 0: The output tensor of same shape as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 RSQRT 83}, @@ -5450,8 +5343,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3 - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3 + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -5477,7 +5370,7 @@ * * 0: A tensor of the same type and shape as input1 and input2. * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint can be different from inputs' scale and zeroPoint. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 SELECT 84}, @@ -5495,7 +5388,7 @@ * * Outputs: * * 0: The output tensor of same shape as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 SIN 85}, @@ -5516,8 +5409,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -5531,14 +5424,14 @@ * * Outputs: * * 0: An n-D tensor of the same type as the input containing the slice. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, %else * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, %/kind * its scale and zeroPoint has to be same as the input0 scale and zeroPoint. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 SLICE 86}, @@ -5550,8 +5443,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -5565,7 +5458,7 @@ * * Outputs: * * 0 ~ (num_splits - 1): Resulting subtensors. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -5573,7 +5466,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 SPLIT 87}, @@ -5591,7 +5484,7 @@ * * Outputs: * * 0: The output tensor of same shape as input0. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 SQRT 88}, @@ -5609,8 +5502,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -5622,7 +5515,7 @@ * * Outputs: * * 0: A tiled tensor of the same {@link %{OperandType}} and rank as `input`. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -5630,7 +5523,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 TILE 89}, @@ -5645,8 +5538,8 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} -%kind canonical ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) +%kind ndk hal_1.3+ + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: from 1 @@ -5659,7 +5552,7 @@ * Outputs: * * 0: An n-D tensor of the same type as the input, containing the k * largest elements along each last dimensional slice. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -5669,7 +5562,7 @@ %/kind * * 1: An n-D tensor of type {@link %{OperandTypeLinkPfx}TENSOR_INT32} * containing the indices of values within the last dimension of input. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 TOPK_V2 90}, @@ -5700,16 +5593,15 @@ * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0, * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * - * Available since %{NNAPILevel4}: - * * Quantized signed (since %{NNAPILevel4}): + * Available since %{APILevel30}: + * * Quantized signed (since %{APILevel30}): * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output. * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (with scale set to * * * input.scale * filter.scale). * - * * Quantized signed with filter symmetric per channel quantization - * (since %{NNAPILevel4}): + * * Quantized signed with filter symmetric per channel quantization (since %{APILevel30}): * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} for input, and output. * * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. * * * {@link %{OperandTypeLinkPfx}TENSOR_INT32} for bias (scale set to 0.0, @@ -5738,7 +5630,7 @@ * tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} or * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the bias must be of the * same type. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} * and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, %else @@ -5783,7 +5675,7 @@ * tensor of type {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} or * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT16}, the bias should be of the * same type. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For filter tensor of {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} * and {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED}, %else @@ -5799,7 +5691,7 @@ * tensor shape. * * 4: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the implicit * padding scheme, has to be one of the -%insert PaddingCodeValues +%insert-lines PaddingCodeValues * * 5: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when * walking through input in the ‘width’ dimension. * * 6: An {@link %{OperandTypeLinkPfx}INT32} scalar, specifying the stride when @@ -5813,14 +5705,14 @@ * Outputs: * * 0: The output 4-D tensor, of shape * [batches, out_height, out_width, depth_out]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, %else * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, %/kind * the scale and zeroPoint can be different from inputs' scale and zeroPoint. -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 TRANSPOSE_CONV_2D 91}, @@ -5930,19 +5822,19 @@ * A 3-D tensor of shape: * If time-major: [max_time, batch_size, output_size] * If batch-major: [batch_size, max_time, output_size] -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * * 1: A tensor of shape [batch_size, output_size] containing a hidden * state from the last time step in the sequence. This output is * optional and can be omitted. If this output is present then * output #2 must be present as well. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. * * 2: A tensor of shape [batch_size, cell_size] containing a cell state * from the last time step in the sequence. This output is optional * and can be omitted. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. %/kind -%insert AVAIL3 -%insert OutputState +%insert-lines AVAIL29 +%insert-lines OutputState */ %{DeclareOperation_1.2 UNIDIRECTIONAL_SEQUENCE_LSTM 92}, @@ -6002,10 +5894,10 @@ * * 1: A tensor of shape [batchSize, numUnits] containing hidden state * from the last time step in the sequence. This output is optional * and can be omitted. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. %/kind -%insert AVAIL3 -%insert OutputState +%insert-lines AVAIL29 +%insert-lines OutputState */ %{DeclareOperation_1.2 UNIDIRECTIONAL_SEQUENCE_RNN 93}, @@ -6021,7 +5913,7 @@ * * {@link %{OperandTypeLinkPfx}TENSOR_FLOAT32} * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} %kind ndk hal_1.3+ - * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{NNAPILevel4}) + * * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} (since %{APILevel30}) %/kind * * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. @@ -6040,18 +5932,18 @@ * height of the output tensor. * * 3: An {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * * 4: Align corners. An optional {@link %{OperandTypeLinkPfx}BOOL} * scalar, default to false. If True, the centers of the 4 corner * pixels of the input and output tensors are aligned, preserving the * values at the corner pixels. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. * * 5: Half pixel centers. An optional {@link %{OperandTypeLinkPfx}BOOL} * scalar, default to false. If True, the pixel centers are assumed to * be at (0.5, 0.5). This is the default behavior of image.resize in * TF 2.0. If this parameter is True, then align_corners parameter * must be False. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. %/kind * * Inputs (resizing by scale): @@ -6071,24 +5963,24 @@ * {@link %{OperandTypeLinkPfx}FLOAT32} otherwise. * * 3: An {@link %{OperandTypeLinkPfx}BOOL} scalar, default to false. * Set to true to specify NCHW data layout for input0 and output0. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * * 4: Align corners. An optional {@link %{OperandTypeLinkPfx}BOOL} * scalar, default to false. If True, the centers of the 4 corner * pixels of the input and output tensors are aligned, preserving the * values at the corner pixels. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. * * 5: Half pixel centers. An optional {@link %{OperandTypeLinkPfx}BOOL} * scalar, default to false. If True, the pixel centers are assumed to * be at (0.5, 0.5). This is the default behavior of image.resize in * TF 2.0. If this parameter is True, then align_corners parameter * must be False. - * Available since %{NNAPILevel4}. + * Available since %{APILevel30}. %/kind * * Outputs: * * 0: The output 4-D tensor, of shape * [batches, new_height, new_width, depth]. -%kind canonical ndk hal_1.3+ +%kind ndk hal_1.3+ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} and * {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} tensor, * the scale and zeroPoint must be the same as input0. @@ -6096,7 +5988,7 @@ * For a {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM} tensor, * the scale and zeroPoint must be the same as input0. %/kind -%insert AVAIL3 +%insert-lines AVAIL29 */ %{DeclareOperation_1.2 RESIZE_NEAREST_NEIGHBOR 94}, %/section @@ -6121,7 +6013,7 @@ * * The formula is: * real_value = (integer_value - zeroPoint) * scale. -%insert AVAIL4 +%insert-lines AVAIL30 */ %{ANN}TENSOR_QUANT8_ASYMM_SIGNED = 14, @@ -6132,11 +6024,11 @@ * {@link ANeuralNetworksModel_setOperandValueFromModel} must be used to set * the value for an Operand of this type. %/kind -%kind canonical hal* +%kind hal* * - * Must have the lifetime {@link %{OperandLifeTime}::SUBGRAPH}. + * Must have the lifetime {@link OperandLifeTime::SUBGRAPH}. %/kind -%insert AVAIL4 +%insert-lines AVAIL30 */ %{ANN}%{MODEL_or_SUBGRAPH} = 15, %/section @@ -6277,7 +6169,7 @@ * "output state (out)" value. * Type: {@link %{OperandTypeLinkPfx}TENSOR_QUANT8_ASYMM_SIGNED} * Shape: [batchSize, outputSize] -%insert AVAIL4 +%insert-lines AVAIL30 */ %{DeclareOperation_1.3 QUANTIZED_LSTM 95}, @@ -6291,7 +6183,7 @@ * types, ranks%{NDK_if_specified}, dimensions%{NDK_if_specified}, scales, * zeroPoints, and %{otherOperandParameters} as the corresponding operation * inputs and outputs. -%kind canonical hal* +%kind hal* * All of the operands mentioned must have fully specified dimensions. %/kind * @@ -6307,7 +6199,7 @@ * * Outputs: * * 0 ~ (m - 1): Outputs produced by the selected %{model_or_subgraph}. -%insert AVAIL4 +%insert-lines AVAIL30 */ %{DeclareOperation_1.3 IF 96}, @@ -6371,7 +6263,7 @@ %kind ndk * The output operand must have fully specified dimensions. %/kind -%kind canonical hal* +%kind hal* * All of the operands mentioned must have fully specified dimensions. %/kind * * 1: A {@link %{OperandTypeLinkPfx}%{MODEL_or_SUBGRAPH}} reference to the body %{model_or_subgraph}. @@ -6379,7 +6271,7 @@ * the same types, ranks%{NDK_if_specified}, dimensions%{NDK_if_specified}, * scales, zeroPoints, and %{otherOperandParameters} as the * corresponding inputs and outputs of the WHILE operation. -%kind canonical hal* +%kind hal* * All of the operands mentioned must have fully specified dimensions. %/kind * * (m inputs): Initial values for input-output operands. @@ -6388,7 +6280,7 @@ * * Outputs: * * 0 ~ (m - 1): Outputs produced by the loop. -%insert AVAIL4 +%insert-lines AVAIL30 */ %{DeclareOperation_1.3 WHILE 97}, @@ -6415,7 +6307,7 @@ * * Outputs: * * 0: The output tensor of same shape and type as input0. -%insert AVAIL4 +%insert-lines AVAIL30 */ %{DeclareOperation_1.3 ELU 98}, @@ -6444,7 +6336,7 @@ * * 0: The output tensor of same shape and type as input0. * Scale and zero point of this tensor may be different from the input * tensor's parameters. -%insert AVAIL4 +%insert-lines AVAIL30 */ %{DeclareOperation_1.3 HARD_SWISH 99}, @@ -6470,7 +6362,7 @@ * * Outputs: * * 0: The output tensor. -%insert AVAIL4 +%insert-lines AVAIL30 */ %{DeclareOperation_1.3 FILL 100}, @@ -6500,7 +6392,7 @@ * Outputs: * * 0: A scalar of {@link %{OperandTypeLinkPfx}INT32}, specifying the rank * of the input tensor. -%insert AVAIL4 +%insert-lines AVAIL30 */ %{DeclareOperation_1.3 RANK 101}, %/section @@ -6510,1316 +6402,3 @@ %/section %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%% Misc HAL types - -%section OperandLifeTime -/** - * How an operand is used. - */ -%kind canonical -enum class LifeTime { -%else -%{enum OperandLifeTime int32_t} { -%/kind - /** - * The operand is internal to the model. It's created by an operation and - * consumed by other operations. It must be an output operand of - * exactly one operation. - */ - %{DeclareEnumValue TEMPORARY_VARIABLE 0}, - - /** - * The operand is an input of %{the_model_or_a_subgraph}. It must not be an output - * operand of any operation. - * - * An operand can't be both input and output of a %{model_or_subgraph}. - */ -%kind hal_1.0 - %{DeclareEnumValue MODEL_INPUT 1}, -%else - %{DeclareEnumValue SUBGRAPH_INPUT 1}, -%/kind - - /** - * The operand is an output of %{the_model_or_a_subgraph}. It must be an output - * operand of exactly one operation. - * - * An operand can't be both input and output of a %{model_or_subgraph}. - */ -%kind hal_1.0 - %{DeclareEnumValue MODEL_OUTPUT 2}, -%else - %{DeclareEnumValue SUBGRAPH_OUTPUT 2}, -%/kind - - /** - * The operand is a constant found in Model%{::}operandValues. It must - * not be an output operand of any operation. - */ - %{DeclareEnumValue CONSTANT_COPY 3}, - - /** - * The operand is a constant that was specified via a Memory - * object. It must not be an output operand of any operation. - */ - %{DeclareEnumValue CONSTANT_REFERENCE 4}, - - /** - * The operand does not have a value. This is valid only for optional - * arguments of operations. - */ - %{DeclareEnumValue NO_VALUE 5}, -%kind canonical hal_1.3+ - - /** - * The operand is a reference to a subgraph. It must be an input to one - * or more {@link OperationType::IF} or {@link OperationType::WHILE} - * operations. - */ - %{DeclareEnumValue SUBGRAPH 6}, -%/kind -%kind canonical - - /** - * This operand is a constant found in a user buffer. It must not be an - * output operand of any operation. - */ - %{DeclareEnumValue POINTER 7}, -%/kind -}; -%/section - -%section DeviceStatus -/** - * Status of a device. - */ -%{enum DeviceStatus int32_t} { - %{DeclareEnumValue AVAILABLE 0}, - %{DeclareEnumValue BUSY 1}, - %{DeclareEnumValue OFFLINE 2}, - %{DeclareEnumValue UNKNOWN 3}, -}; -%/section - -%kind canonical -%define init_execTime = kDefaultExecTime -%define init_powerUsage = kDefaultPowerUsage -%else -%define init_execTime -%define init_powerUsage -%/kind - -%section PerformanceInfo -/** - * Performance information for the reference workload. - * - * Used by a driver to report its performance characteristics. - */ -struct PerformanceInfo { - /** - * Ratio of the time taken by the driver to execute the - * workload compared to the time the CPU would take for the - * same workload. A lower number is better. - */ - float execTime%{init_execTime}; - - /** - * Ratio of the energy used by the driver compared to what - * the CPU would use for doing the same workload. A lower number - * is better. - */ - float powerUsage%{init_powerUsage}; -}; -%/section - -%section OutputShape -/** - * Describes the shape information of an output operand after execution. - */ -struct OutputShape { - /** - * Dimensions of the operand. - */ - %{vec}<uint32_t> dimensions; - - /** - * Whether the provided buffer size is sufficient for the output. - */ - bool isSufficient%{init_bool}; -}; -%/section - -%section MeasureTiming -/** - * Specifies whether or not to measure timing information during execution. - */ -%{enum MeasureTiming int32_t} { - NO = 0, - YES = 1, -}; -%/section - -%section ExecutionPreference -/** - * Execution preferences. -%insert AVAIL1Short - */ -%{enum ExecutionPreference int32_t} { - /** - * Prefer executing in a way that minimizes battery drain. - * This is desirable for compilations that will be executed often. - */ - %{DeclareExecutionPreference LOW_POWER 0}, - /** - * Prefer returning a single answer as fast as possible, even if this causes - * more power consumption. - */ - %{DeclareExecutionPreference FAST_SINGLE_ANSWER 1}, - /** - * Prefer maximizing the throughput of successive frames, for example when - * processing successive frames coming from the camera. - */ - %{DeclareExecutionPreference SUSTAINED_SPEED 2}, -%kind canonical - DEFAULT = FAST_SINGLE_ANSWER, -%/kind -}%{ndk_enum_name PreferenceCode}; -%/section - -%section DeviceType -/** - * Device types. - * - * The type of NNAPI device. - */ -%{enum DeviceType int32_t} { -%kind hal* - // Leaving 0 unused as it means unknown type in NDK NNAPI. There is no - // HAL equivalent of unknown type and a 1.2 HAL implementation must belong - // to one of the categories below. -%else - /** The device type cannot be provided. */ - %{DeclareDeviceType UNKNOWN 0}, -%/kind - /** The device does not fall into any category below. */ - %{DeclareDeviceType OTHER 1}, - /** The device runs NNAPI models on single or multi-core CPU. */ - %{DeclareDeviceType CPU 2}, - /** The device can run NNAPI models and also accelerate graphics APIs such - * as OpenGL ES and Vulkan. */ - %{DeclareDeviceType GPU 3}, - /** Dedicated accelerator for Machine Learning workloads. */ - %{DeclareDeviceType ACCELERATOR 4}, -}%{ndk_enum_name DeviceTypeCode}; -%/section - -%% NOTE: This is different from the NDK PriorityCode. -%section Priority -/** -%kind ndk - * Relative execution priority. - * - * Available since NNAPI feature level 4. -%else - * Priority given to a prepared model for execution. -%/kind - */ -%{enum Priority int32_t} { - %{DeclarePriority LOW 0 90}, - %{DeclarePriority MEDIUM 1 100}, - %{DeclarePriority HIGH 2 110}, -%kind canonical ndk - %{DeclarePriority DEFAULT MEDIUM ANEURALNETWORKS_PRIORITY_MEDIUM}, -%/kind -}%{ndk_enum_name PriorityCode}; -%/section - -%kind canonical -%define OptionalDuration OptionalDuration -%else -%define OptionalDuration uint64_t -%/kind - -%section Timing -/** -%kind hal_1.2 - -%/kind - * Timing information measured during execution. Each time is a duration from - * the beginning of some task to the end of that task, including time when that - * task is not active (for example, preempted by some other task, or - * waiting for some resource to become available). - * -%kind hal* - * Times are measured in microseconds. - * When a time is not available, it must be reported as UINT64_MAX. -%else - * Times are measured in nanoseconds. -%/kind - */ -struct Timing { - /** Execution time on device (not driver, which runs on host processor). */ - %{OptionalDuration} timeOnDevice; - /** Execution time in driver (including time on device). */ - %{OptionalDuration} timeInDriver; -}; -%/section - -%section Capabilities_float_quant_performance - /** - * Driver performance when operating on float32 data. - */ - PerformanceInfo float32Performance; - - /** - * Driver performance when operating on asymmetric 8-bit quantized data. - */ - PerformanceInfo quantized8Performance; -%/section - -%kind canonical -%define OperandPerformanceTable OperandPerformanceTable -%else -%define OperandPerformanceTable vec<OperandPerformance> -%/kind - -%section Capabilities_relaxedPerformance - /** - * Driver performance when operating on float32 data but performing - * calculations with range and/or precision as low as that of the IEEE - * 754 16-bit floating-point format. - */ -%kind hal_1.1 - PerformanceInfo relaxedFloat32toFloat16Performance; -%else - PerformanceInfo relaxedFloat32toFloat16PerformanceScalar; - PerformanceInfo relaxedFloat32toFloat16PerformanceTensor; -%/kind -%/section - -%section Capabilities_operandPerformance - /** - * Performance by operand type. Must be sorted by OperandType. -%kind hal_1.2 - * If a particular OperandType is not present in operandPerformance, - * its performance is treated as { .execTime = FLT_MAX, .powerUsage = FLT_MAX }. -%else - * - * If a particular {@link OperandType} is not present in operandPerformance, - * its performance is treated as - * { .execTime = FLT_MAX, .powerUsage = FLT_MAX }. - * - * Performance does not apply to {@link OperandType::SUBGRAPH}, and a driver - * must not report operand performance for {@link OperandType::SUBGRAPH}. -%/kind - */ - %{OperandPerformanceTable} operandPerformance; -%/section - -%section Capabilities_if_while_performance - /** - * Performance of an {@link OperationType::IF} operation is the sum of - * {@link Capabilities::ifPerformance} and the mean of performance for the - * two branch subgraphs, where performance for a subgraph is the sum of the - * performance of all operations within the subgraph. - */ - PerformanceInfo ifPerformance; - - /** - * Performance of a {@link OperationType::WHILE} operation is the sum of - * {@link Capabilities::whilePerformance}, performance for the condition - * subgraph and performance for the body subgraph, where performance for a - * subgraph is the sum of the performance of all operations within the - * subgraph. - */ - PerformanceInfo whilePerformance; -%/section - -%section OperandPerformance -/** - * Driver performance when operating on a particular data type. - * In the case of float32 data, this is used when the calculations - * are not relaxed. - */ -struct OperandPerformance { - OperandType type%{init_pod}; - PerformanceInfo info; -}; -%/section - -%section Capabilities -/** - * The capabilities of a driver. -%kind hal_1.2 - * - * Performance of an operation comes from the type of its first operand. - * This represents performance for non extension operand types. -%/kind -%kind canonical hal_1.3+ - * - * This represents performance of non-extension operations. - * - * Performance of an operation other than {@link OperationType::IF} and - * {@link OperationType::WHILE} comes from the type of its first operand. -%/kind - */ -struct Capabilities { -%kind canonical -%insert-indented 4 PerformanceInfo - -%insert-indented 4 OperandPerformance - - class OperandPerformanceTable { - public: - static Result<OperandPerformanceTable> create( - std::vector<OperandPerformance> operandPerformances); - - PerformanceInfo lookup(OperandType type) const; - const std::vector<OperandPerformance>& asVector() const; - - private: - explicit OperandPerformanceTable(std::vector<OperandPerformance> operandPerformances); - std::vector<OperandPerformance> mSorted; - }; - -%insert Capabilities_relaxedPerformance - -%insert Capabilities_operandPerformance - -%insert Capabilities_if_while_performance -%/kind -%kind hal_1.0 -%insert Capabilities_float_quant_performance -%/kind -%kind hal_1.1 -%insert Capabilities_float_quant_performance - -%insert Capabilities_relaxedPerformance -%/kind -%kind hal_1.2 -%insert Capabilities_relaxedPerformance - -%insert-indented 4 OperandPerformance - -%insert Capabilities_operandPerformance -%/kind -%kind hal_1.3 -%insert Capabilities_relaxedPerformance - -%insert-indented 4 OperandPerformance - -%insert Capabilities_operandPerformance - -%insert Capabilities_if_while_performance -%/kind -}; -%/section - -%section DataLocation -/** - * Describes the location of a data object. - */ -struct DataLocation { -%kind canonical - /** - * The address of the memory where the data is found. - * - * This field is only active when lifetime is POINTER. - */ - std::variant<const void*, void*> pointer; - -%/kind - /** - * The index of the memory pool where this location is found. - */ - uint32_t poolIndex%{init_int}; - - /** - * Offset in bytes from the start of the pool. - */ - uint32_t offset%{init_int}; - - /** - * The length of the data in bytes. - */ - uint32_t length%{init_int}; -%kind canonical - - /** - * The end padding of the specified memory region in bytes. - */ - uint32_t padding%{init_int}; -%/kind -}; -%/section - -%section Extension_name - /** - * The extension name. - * - * The name must consist of lowercase latin letters, numbers, periods, and - * underscore signs. The name must contain at least one period. - * - * The name must start with the reverse domain name of the vendor. - * - * Example: com.google.test_extension - */ - %{string} name; -%/section - -%section Extension -/** - * Information about an extension. - */ -struct Extension { -%kind hal* -%insert Extension_name - -%/kind - /** - * Information about an extension operand type. - */ - struct OperandTypeInformation { - /** - * The extension operand type. - */ - uint16_t type%{init_int}; - - /** - * Indicates whether the extension operand type represents a tensor or - * a scalar. - */ - bool isTensor%{init_bool}; - - /** - * The byte size of the operand (if scalar) or of a single element (if - * tensor). - */ - uint32_t byteSize%{init_int}; - }; - -%kind canonical -%insert Extension_name - -%/kind - /** - * Information about operand types defined by the extension. - */ - %{vec}<OperandTypeInformation> operandTypes; -}; -%/section - -%section Operation -/** - * Describes one operation of the model's graph. - */ -struct Operation { - /** - * The operation type. -%kind hal_1.2+ - * - * Besides the values listed in {@link OperationType}, any value above - * {@link OperationTypeRange::BASE_MAX} is possible and should be interpreted - * as an extension type according to {@link Model::extensionNameToPrefix}. -%/kind - */ - OperationType type%{init_pod}; - - /** - * Describes the table that contains the indexes of the inputs of the - * operation. The offset is the index in the operandIndexes table. - */ - %{vec}<uint32_t> inputs; - - /** - * Describes the table that contains the indexes of the outputs of the - * operation. The offset is the index in the operandIndexes table. - */ - %{vec}<uint32_t> outputs; -}; -%/section - -%section FusedActivationFunc -/** - * Fused activation function types. -%insert AVAIL1Short - */ -%kind canonical -enum class FusedActivationFunc : int32_t { -%else -%{enum FusedActivationFunc int32_t} { -%/kind - /** NO fused activation function. */ - %{DeclareFusedActivationFunc NONE 0}, - /** Fused ReLU activation function. */ - %{DeclareFusedActivationFunc RELU 1}, - /** Fused ReLU1 activation function. */ - %{DeclareFusedActivationFunc RELU1 2}, - /** Fused ReLU6 activation function. */ - %{DeclareFusedActivationFunc RELU6 3}, -}%{ndk_enum_name FuseCode}; -%/section - -%section ExtraParams_Comment -/** - * Additional parameters specific to a particular operand type. - */ -%/section - -%section ExtraParams_none_Comment -/** - * No additional parameters. - */ -%/section - -%section ExtraParams_channelQuant_Comment -/** - * Symmetric per-channel quantization parameters. - * - * Only applicable to operands of type %{ANN}TENSOR_QUANT8_SYMM_PER_CHANNEL. - */ -%/section - -%section ExtraParams_extension_Comment -/** - * Extension operand parameters. - * - * The framework treats this as an opaque data blob. - * The format is up to individual extensions. - */ -%/section - -%section SymmPerChannelQuantParams_Comment -/** - * Parameters for %{ANN}TENSOR_QUANT8_SYMM_PER_CHANNEL operand. - */ -%/section - -%section SymmPerChannelQuantParams -%insert SymmPerChannelQuantParams_Comment -struct SymmPerChannelQuantParams { - /** Array of scaling values for each channel. Each value must be greater than zero. */ - %{vec}<float> scales; - /** Index of the channel dimension */ - uint32_t channelDim%{init_int}; -}; -%/section - -%kind canonical -%section location_pointer_is_null - * - location.pointer is null. -%/section -%else -%section location_pointer_is_null -%/section -%/kind - -%% List item symbol -%kind hal* -%define li . -%else -%define li - -%/kind - -%section Operand -/** - * Describes one operand of the model's graph. - */ -struct Operand { -%kind canonical -%insert-indented 4 OperandLifeTime - -%insert-indented 4 ExtraParams_none_Comment - using NoParams = std::monostate; - -%insert-indented 4 SymmPerChannelQuantParams - -%insert-indented 4 ExtraParams_extension_Comment - using ExtensionParams = std::vector<uint8_t>; - -%insert-indented 4 ExtraParams_Comment - using ExtraParams = std::variant<NoParams, SymmPerChannelQuantParams, ExtensionParams>; - -%/kind - /** -%kind canonical - * The data type. - * - * Besides the values listed in {@link OperationType}, any value equal or over - * (1 << kExtensionTypeBits) is possible and should be interpreted - * as an extension type according to {@link Model::extensionNameToPrefix}. -%/kind -%kind hal_1.2+ - * The data type. - * - * Besides the values listed in {@link OperandType}, any value above - * {@link OperandTypeRange::BASE_MAX} is possible and should be interpreted - * as an extension type according to {@link Model::extensionNameToPrefix}. -%/kind -%kind hal_1.0 - * Data type of the operand. -%/kind - */ - OperandType type%{init_pod}; - - /** - * Dimensions of the operand. - * - * For a scalar operand, dimensions.size() must be 0. -%kind hal_1.0 - * - * For a tensor operand, dimensions.size() must be at least 1; - * however, any of the dimensions may be unspecified. -%/kind - * - * A tensor operand with all dimensions specified has "fully - * specified" dimensions. Whenever possible (i.e., whenever the - * dimensions are known at model construction time), a tensor - * operand should have (but is not required to have) fully - * specified dimensions, in order to enable the best possible - * performance. - * - * If a tensor operand's dimensions are not fully specified, the - * dimensions of the operand are deduced from the operand - * dimensions and values of the operation for which that operand -%kind hal_1.0 hal_1.1 hal_1.2 - * is an output. -%else - * is an output or from the corresponding {@link OperationType::IF} or - * {@link OperationType::WHILE} operation input operand dimensions in the - * case of referenced subgraph input operands. -%/kind - * - * In the following situations, a tensor operand's dimensions must - * be fully specified: - * -%kind canonical - * %{li} The operand has lifetime CONSTANT_COPY, CONSTANT_REFERENCE, or - * POINTER. -%else - * %{li} The operand has lifetime CONSTANT_COPY or - * CONSTANT_REFERENCE. -%/kind - * -%kind hal_1.0 - * %{li} The operand has lifetime MODEL_INPUT or MODEL_OUTPUT. Fully - * specified dimensions must either be present in the -%/kind -%kind hal_1.2 - * %{li} The operand has lifetime MODEL_INPUT. Fully - * specified dimensions must either be present in the -%/kind -%kind canonical hal_1.3+ - * %{li} The operand has lifetime SUBGRAPH_INPUT and belongs to the main - * subgraph. Fully specified dimensions must either be present in the -%/kind - * Operand or they must be provided in the corresponding - * RequestArgument. -%kind hal_1.0 - * EXCEPTION: If the input or output is optional and omitted -%else - * EXCEPTION: If the input is optional and omitted -%/kind - * (by setting the hasNoValue field of the corresponding - * RequestArgument to true) then it need not have fully - * specified dimensions. - * - * A tensor operand with some number of unspecified dimensions is - * represented by setting each unspecified dimension to 0. -%kind canonical hal_1.2+ - * - * A tensor operand with unspecified rank is represented by providing - * an empty dimensions vector. -%/kind - */ - %{Dimensions} dimensions; -%kind hal* - - /** - * The number of times this operand appears as an operation input. - * - * (For example, if this operand appears once in one operation's - * input list, and three times in another operation's input list, - * then numberOfConsumers = 4.) - */ - uint32_t numberOfConsumers; -%/kind - - /** - * Quantized scale of the operand. - * -%kind hal_1.0 - * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or - * TENSOR_INT32. -%else - * Must be 0 when not applicable to an operand type. - * - * See {@link OperandType}. -%/kind - */ - float scale%{init_float}; - - /** - * Quantized zero-point offset of the operand. - * -%kind hal_1.0 - * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM. -%else - * Must be 0 when not applicable to an operand type. - * - * See {@link OperandType}. -%/kind - */ - int32_t zeroPoint%{init_int}; - - /** - * How the operand is used. - */ - %{concat_or_skip_first Operand LifeTime} lifetime%{init_pod}; - - /** - * Where to find the data for this operand. -%kind hal_1.0 hal_1.1 hal_1.2 - * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or - * NO_VALUE: -%else - * If the lifetime is TEMPORARY_VARIABLE, SUBGRAPH_INPUT, SUBGRAPH_OUTPUT, - * or NO_VALUE: -%/kind - * - All the fields must be 0. - * If the lifetime is CONSTANT_COPY: -%insert location_pointer_is_null - * - location.poolIndex is 0. - * - location.offset is the offset in bytes into Model%{::}operandValues. - * - location.length is set. -%kind canonical - * - location.padding is 0. -%/kind - * If the lifetime is CONSTANT_REFERENCE: -%insert location_pointer_is_null - * - location.poolIndex is set. - * - location.offset is the offset in bytes into the specified pool. - * - location.length is set. -%kind canonical - * - location.padding is set. -%/kind -%kind canonical hal_1.3+ - * If the lifetime is SUBGRAPH: -%insert location_pointer_is_null - * - location.poolIndex is 0. - * - location.offset is the index of the referenced subgraph in - * {@link Model::referenced}. - * - location.length is 0. -%/kind -%kind canonical - * - location.padding is 0. -%/kind -%kind canonical - * If the lifetime is POINTER: - * - location.pointer is non-null. - * - location.poolIndex is 0. - * - location.offset is 0. - * - location.length is set. - * - location.padding is 0. -%/kind - */ - DataLocation location; -%kind hal_1.2 - -%insert-indented 4 ExtraParams_Comment - safe_union ExtraParams { -%insert-indented 8 ExtraParams_none_Comment - Monostate none; - -%insert-indented 8 ExtraParams_channelQuant_Comment - SymmPerChannelQuantParams channelQuant; - -%insert-indented 8 ExtraParams_extension_Comment - vec<uint8_t> extension; - } extraParams; -%/kind -%kind canonical hal_1.3 - -%insert-indented 4 ExtraParams_Comment - %{concat_or_skip_first @1.2::Operand. ExtraParams} extraParams; -%/kind -}; -%/section - -%kind canonical -%define OperandValues OperandValues -%define SharedMemory SharedMemory -%else -%define OperandValues vec<uint8_t> -%define SharedMemory memory -%/kind - -%section Model_1.0 - /** - * A byte buffer containing operand data that were copied into the model. - * - * An operand's value must be located here if and only if Operand::lifetime - * equals %{OperandLifeTime}::CONSTANT_COPY. - */ - %{OperandValues} operandValues; - - /** - * A collection of shared memory pools containing operand values. - * - * An operand's value must be located here if and only if Operand::lifetime - * equals %{OperandLifeTime}::CONSTANT_REFERENCE. - */ - %{vec}<%{SharedMemory}> pools; -%/section - -%section Model_1.1 - /** - * 'true' indicates TENSOR_FLOAT32 may be calculated with range and/or - * precision as low as that of the IEEE 754 16-bit floating-point format. - * 'false' indicates TENSOR_FLOAT32 must be calculated using at least the - * range and precision of the IEEE 754 32-bit floating-point format. - */ - bool relaxComputationFloat32toFloat16%{init_bool}; -%/section - -%section Model_1.2 - /** - * The mapping between extension names and prefixes of operand and - * operation type values. - * -%kind canonical - * An operand or operation whose numeric type value is equal to or greater - * than (1 << kExtensionTypeBits) should be interpreted -%/kind -%kind hal* - * An operand or operation whose numeric type value is above - * {@link OperandTypeRange::BASE_MAX} or - * {@link OperationTypeRange::BASE_MAX} respectively should be interpreted -%/kind - * as an extension operand. The low -%kind hal_1.2 - * {@link Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the value - * correspond to the type ID within the extension and the high - * {@link Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode -%/kind -%kind hal_1.3 - * {@link @1.2::Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the - * value correspond to the type ID within the extension and the high - * {@link @1.2::Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode -%/kind -%kind canonical - * {@link kExtensionTypeBits} bits of the value correspond to the type ID - * within the extension and the high {@link kExtensionPrefixBits} bits encode -%/kind - * the "prefix", which maps uniquely to the extension name. - * - * For example, if a model contains an operation whose value is - * 0xAAAABBBB and extensionNameToPrefix contains an entry with - * prefix=0xAAAA and name="vendor.test.test_extension", then - * the operation should be interpreted as the operation 0xBBBB - * of the extension named vendor.test.test_extension. - * - * This is a one-to-one correspondence. That is, there must be at most one - * prefix corresponding to each extension name and at most one extension - * name corresponding to each prefix. - */ -%kind hal_1.3 - %{vec}<@1.2::Model.ExtensionNameAndPrefix> extensionNameToPrefix; -%else - %{vec}<ExtensionNameAndPrefix> extensionNameToPrefix; -%/kind -%/section - -%section Model_1.3_main_and_referenced_subgraphs - /** - * The top-level subgraph. - */ - Subgraph main; - - /** - * Referenced subgraphs. - * - * Each subgraph is referenced by the main subgraph or at least one other - * referenced subgraph. - * - * There must be no reference cycles. - */ - %{vec}<Subgraph> referenced; -%/section - -%section Subgraph_fields - /** - * All operands included in the %{model_or_subgraph}. - */ - %{vec}<Operand> operands; - - /** - * All operations included in the %{model_or_subgraph}. - * - * The operations are sorted into execution order. Every operand - * with lifetime %{MODEL_or_SUBGRAPH}_OUTPUT or TEMPORARY_VARIABLE must be - * written before it is read. - */ - %{vec}<Operation> operations; - - /** - * Input indexes of the %{model_or_subgraph}. There must be at least one. - * - * Each value corresponds to the index of the operand in "operands". - */ - %{vec}<uint32_t> inputIndexes; - - /** - * Output indexes of the %{model_or_subgraph}. There must be at least one. - * - * Each value corresponds to the index of the operand in "operands". - */ - %{vec}<uint32_t> outputIndexes; -%/section - -%section Subgraph -/** - * An excerpt of the execution graph. - */ -struct Subgraph { -%insert Subgraph_fields -}; -%/section - -%section ExtensionNameAndPrefix -/** - * A correspondence between an extension name and a prefix of operand and - * operation type values. - */ -struct ExtensionNameAndPrefix { - /** - * The extension name. - * - * See {@link Extension::name} for the format specification. - */ - %{string} name; - - /** - * The unique extension identifier within the model. - * - * See {@link Model::extensionNameToPrefix}. - */ - uint16_t prefix%{init_int}; -}; -%/section - -%section ExtensionTypeEncoding -/** - * Numeric values of extension operand and operation types have the - * following structure: - * - 16 high bits represent the "prefix", which corresponds uniquely to the - * extension name. - * - 16 low bits represent the type ID within the extension. - */ -%kind canonical -constexpr uint8_t kExtensionTypeBits = 16; -constexpr uint8_t kExtensionPrefixBits = 16; -constexpr uint32_t kTypeWithinExtensionMask = 0xFFFF; -%else -enum ExtensionTypeEncoding : uint8_t { - HIGH_BITS_PREFIX = 16, - LOW_BITS_TYPE = 16, -}; -%/kind -%/section - -%section Model -/** - * A Neural Network Model. - * - * This includes not only the execution graph, but also constant data such as - * weights or scalars added at construction time. The only information that -%kind hal_1.0 - * might not be known is the shape of the input tensors. -%else - * may not be known is the shape of the input tensors. -%/kind - */ -struct Model { -%kind canonical -%insert-indented 4 Subgraph - - class OperandValues { - public: - OperandValues(); - OperandValues(const uint8_t* data, size_t length); - - // Append a segment of memory (starting at `data` with `length` number of bytes) to the back - // of `OperandValues`, adding padding as necessary so that the appended data is aligned. - // Refer to `getAlignmentForLength` for more information on alignment (such as what the - // current alignments are for different data lengths). - DataLocation append(const uint8_t* data, size_t length); - - const uint8_t* data() const; - size_t size() const; - - private: - std::vector<uint8_t> mData; - }; - -%insert-indented 4 ExtensionNameAndPrefix - -%insert Model_1.3_main_and_referenced_subgraphs - -%insert Model_1.0 - -%insert Model_1.1 - -%insert Model_1.2 -%/kind -%kind hal_1.0 -%insert Subgraph_fields - -%insert Model_1.0 -%/kind -%kind hal_1.1 -%insert Subgraph_fields - -%insert Model_1.0 - -%insert Model_1.1 -%/kind -%kind hal_1.2 -%insert Subgraph_fields - -%insert Model_1.0 - -%insert Model_1.1 - -%insert Model_1.2 - -%insert-indented 4 ExtensionNameAndPrefix - -%insert-indented 4 ExtensionTypeEncoding -%/kind -%kind hal_1.3 -%insert Model_1.3_main_and_referenced_subgraphs - -%insert Model_1.0 - -%insert Model_1.1 - -%insert Model_1.2 -%/kind -}; -%/section - -%section BufferDesc -/** - * A buffer descriptor. Describes the properties of a buffer. - */ -struct BufferDesc { - /** - * Dimensions of the buffer. May have unknown dimensions or rank. A buffer with some number - * of unspecified dimensions is represented by setting each unspecified dimension to 0. A - * buffer with unspecified rank is represented by providing an empty dimensions vector. - */ - %{Dimensions} dimensions; -}; -%/section - -%section BufferRole -/** - * Describes a role of an input or output to a prepared model. - */ -struct BufferRole { - /** - * The index of the IPreparedModel within the "preparedModel" argument passed in - * IDevice::allocate. - */ - uint32_t modelIndex%{init_int}; - - /** - * The index of the input or output operand. - */ - uint32_t ioIndex%{init_int}; - - /** - * A floating-point value within the range (0.0, 1.0]. Describes how likely the - * buffer is to be used in the specified role. This is provided as a hint to - * optimize the case when multiple roles prefer different buffer locations or data - * layouts. - */ -%kind canonical - float probability%{init_float}; -%else - float frequency%{init_float}; -%/kind -}; -%/section - -%kind canonical -%define inputIndexes Model::main::inputIndexes -%define outputIndexes Model::main::outputIndexes -%/kind -%kind hal_1.3 -%define inputIndexes Model.main.inputIndexes -%define outputIndexes Model.main.outputIndexes -%/kind -%kind hal_1.0 hal_1.1 hal_1.2 -%define inputIndexes Model.inputIndexes -%define outputIndexes Model.outputIndexes -%/kind -%kind ndk -%define inputIndexes @@@NOT_DEFINED@@@ -%define outputIndexes @@@NOT_DEFINED@@@ -%/kind - -%kind canonical -%define inputs inputs -%define outputs outputs -%else -%define inputs input -%define outputs output -%/kind - -%section Request_inputs_and_outputs - /** - * Input data and information to be used in the execution of a prepared - * model. - * - * The index of the input corresponds to the index in %{inputIndexes}. - * E.g., %{inputs}[i] corresponds to %{inputIndexes}[i]. - */ - %{vec}<%{concat_or_skip_first Request Argument}> inputs; - - /** - * Output data and information to be used in the execution of a prepared - * model. - * - * The index of the output corresponds to the index in %{outputIndexes}. - * E.g., %{outputs}[i] corresponds to %{outputIndexes}[i]. - */ - %{vec}<%{concat_or_skip_first Request Argument}> outputs; -%/section - -%section Request_pools - /** -%kind hal_1.0 - * A collection of shared memory pools containing operand data for both the -%else - * A collection of memory pools containing operand data for both the -%/kind - * inputs and the outputs to a model. - */ -%kind hal_1.0 - vec<memory> pools; -%else - %{vec}<MemoryPool> pools; -%/kind -%/section - -%section Request_MemoryPool_Comment -/** - * A memory pool. - */ -%/section - -%section RequestArgument -/** - * Metadata information specifying the location of the input or output data and - * any updates to the input or output operand. - */ -struct %{concat_or_skip_first Request Argument} { -%kind canonical - enum class LifeTime { - POOL = 0, - NO_VALUE = 1, - POINTER = 2, - }; - -%/kind -%kind hal_1.0 - /** - * If true, the argument does not have a value. This can be used for - * operations that take optional arguments. If true, the fields of location - * are set to 0 and the dimensions vector is left empty. - */ - bool hasNoValue; -%/kind -%kind canonical - LifeTime lifetime%{init_pod}; -%/kind - - /** - * The location within one of the memory pools passed in the Request. - */ - DataLocation location; - - /** - * Updated dimension information. - * - * If dimensions.size() > 0, dimension information was provided - * along with the argument. This can be the case for models that - * accept inputs of varying size. This can't change the rank, just - * the value of the dimensions that were unspecified in the - * model. If dimensions.size() > 0, then all dimensions must be - * specified here; and any dimension that was specified in the - * model must have the same value here. - * - * If the dimensions in the model are not fully specified, then - * they must be fully specified here, unless hasNoValue is set to - * true. If the dimensions in the model are fully specified, then - * either dimensions.size() may be 0, or the dimensions in the - * model must be identical to the dimensions here. - */ - %{Dimensions} dimensions; -}; -%/section - -%section Request -/** - * Inputs to be sent to and outputs to be retrieved from a prepared model. - * - * A Request serves two primary tasks: - * 1) Provides the input and output data to be used when executing the model. - * 2) Specifies any updates to the input operand metadata that were left - * unspecified at model preparation time. - * - * An output must not overlap with any other output, with an input, or - * with an operand of lifetime CONSTANT_REFERENCE. - */ -struct Request { -%kind canonical -%insert-indented 4 RequestArgument - - /** - * Specifies a driver-managed buffer. It is the token corresponding to an - * IBuffer returned from IDevice::allocate, and is specific to the IDevice - * object. - */ - enum class MemoryDomainToken : uint32_t {}; - -%insert-indented 4 Request_MemoryPool_Comment - using MemoryPool = std::variant<SharedMemory, MemoryDomainToken, SharedBuffer>; - -%/kind -%insert Request_inputs_and_outputs -%kind hal_1.3 - -%insert-indented 4 Request_MemoryPool_Comment - safe_union MemoryPool { - /** - * Specifies a client-managed shared memory pool. - */ - memory hidlMemory; - - /** - * Specifies a driver-managed buffer. It is the token returned from IDevice::allocate, - * and is specific to the IDevice object. - */ - uint32_t token; - }; -%/kind - -%insert Request_pools -}; -%/section - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
diff --git a/tools/ion_watcher/Android.bp b/tools/ion_watcher/Android.bp index c8c014b..bceb227 100644 --- a/tools/ion_watcher/Android.bp +++ b/tools/ion_watcher/Android.bp
@@ -14,10 +14,6 @@ * limitations under the License. */ -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - cc_binary { name: "ion_watcher", srcs: [
diff --git a/tools/ion_watcher/ion_watcher.cpp b/tools/ion_watcher/ion_watcher.cpp index cf1ecbf..1a79b38 100644 --- a/tools/ion_watcher/ion_watcher.cpp +++ b/tools/ion_watcher/ion_watcher.cpp
@@ -16,14 +16,14 @@ #define LOG_TAG "IonWatcher" -#include <android/log.h> #include <stdio.h> #include <unistd.h> - #include <fstream> #include <iostream> #include <sstream> #include <string> + +#include <android/log.h> #define ATRACE_TAG ATRACE_TAG_NNAPI #include <utils/Trace.h>
diff --git a/tools/systrace_parser/parser/test/cpu.html b/tools/systrace_parser/parser/test/cpu.html index 2c7fbc0..8428d16 100644 --- a/tools/systrace_parser/parser/test/cpu.html +++ b/tools/systrace_parser/parser/test/cpu.html
@@ -4734,7 +4734,7 @@ visitedDomainIds.add(current.domainId);const outgoingTransformers=this.transformerMapByDomainId_[current.domainId];if(!outgoingTransformers)continue;for(const outgoingDomainId in outgoingTransformers){const toNextDomainTransformer=outgoingTransformers[outgoingDomainId];const toCurrentDomainTransformer=current.transformer;queue.push({domainId:outgoingDomainId,transformer:Transformer.compose(toNextDomainTransformer,toCurrentDomainTransformer)});}} return undefined;},selectModelDomainId_(){this.ensureAllDomainsAreConnected_();for(const chromeDomainId of POSSIBLE_CHROME_CLOCK_DOMAINS){if(this.domainsSeen_.has(chromeDomainId)){this.modelDomainId_=chromeDomainId;return;}} const domainsSeenArray=Array.from(this.domainsSeen_);domainsSeenArray.sort();this.modelDomainId_=domainsSeenArray[0];},ensureAllDomainsAreConnected_(){let firstDomainId=undefined;for(const domainId of this.domainsSeen_){if(!firstDomainId){firstDomainId=domainId;continue;} -if(!this.getTransformerBetween_(firstDomainId,domainId)){throw new Error('Unable to select a primary clock domain because no '+'path can be found from "'+firstDomainId+'" to "'+domainId+'".');}} +if(!this.getTransformerBetween_(firstDomainId,domainId)){throw new Error('Unable to select a master clock domain because no '+'path can be found from "'+firstDomainId+'" to "'+domainId+'".');}} return true;},onDomainSeen_(domainId){if(domainId===ClockDomainId.UNKNOWN_CHROME_LEGACY&&!this.domainsSeen_.has(ClockDomainId.UNKNOWN_CHROME_LEGACY)){for(const chromeDomainId of POSSIBLE_CHROME_CLOCK_DOMAINS){if(chromeDomainId===ClockDomainId.UNKNOWN_CHROME_LEGACY){continue;} this.collapseDomains_(ClockDomainId.UNKNOWN_CHROME_LEGACY,chromeDomainId);}} this.domainsSeen_.add(domainId);},onSyncCompleted_(marker1,marker2){const forwardTransformer=Transformer.fromMarkers(marker1,marker2);const backwardTransformer=Transformer.fromMarkers(marker2,marker1);const existingTransformer=this.getOrCreateTransformerMap_(marker1.domainId)[marker2.domainId];if(!existingTransformer||forwardTransformer.error<existingTransformer.error){this.getOrCreateTransformerMap_(marker1.domainId)[marker2.domainId]=forwardTransformer;this.getOrCreateTransformerMap_(marker2.domainId)[marker1.domainId]=backwardTransformer;}},collapseDomains_(domain1Id,domain2Id){this.getOrCreateTransformerMap_(domain1Id)[domain2Id]=this.getOrCreateTransformerMap_(domain2Id)[domain1Id]=Transformer.IDENTITY;},getOrCreateTransformerMap_(domainId){if(!this.transformerMapByDomainId_[domainId]){this.transformerMapByDomainId_[domainId]={};} @@ -5152,7 +5152,7 @@ static uint8ArrayToString_(arr){if(typeof TextDecoder!=='undefined'){const decoder=new TextDecoder('utf-8');return decoder.decode(arr);} const c=[];for(let i=0;i<arr.length;i+=MAX_FUNCTION_ARGS_COUNT){c.push(String.fromCharCode(...arr.subarray(i,i+MAX_FUNCTION_ARGS_COUNT)));} return c.join('');}} -return{InMemoryTraceStream,};});!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).pako=t()}}(function(){return function t(e,a,i){function n(s,o){if(!a[s]){if(!e[s]){var l="function"==typeof require&&require;if(!o&&l)return l(s,!0);if(r)return r(s,!0);var h=new Error("Cannot find module '"+s+"'");throw h.code="MODULE_NOT_FOUND",h}var d=a[s]={exports:{}};e[s][0].call(d.exports,function(t){var a=e[s][1][t];return n(a||t)},d,d.exports,t,e,a,i)}return a[s].exports}for(var r="function"==typeof require&&require,s=0;s<i.length;s++)n(i[s]);return n}({1:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({level:_,method:c,chunkSize:16384,windowBits:15,memLevel:8,strategy:u,to:""},t||{});var e=this.options;e.raw&&e.windowBits>0?e.windowBits=-e.windowBits:e.gzip&&e.windowBits>0&&e.windowBits<16&&(e.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new h,this.strm.avail_out=0;var a=r.deflateInit2(this.strm,e.level,e.method,e.windowBits,e.memLevel,e.strategy);if(a!==f)throw new Error(l[a]);if(e.header&&r.deflateSetHeader(this.strm,e.header),e.dictionary){var n;if(n="string"==typeof e.dictionary?o.string2buf(e.dictionary):"[object ArrayBuffer]"===d.call(e.dictionary)?new Uint8Array(e.dictionary):e.dictionary,(a=r.deflateSetDictionary(this.strm,n))!==f)throw new Error(l[a]);this._dict_set=!0}}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||l[a.err];return a.result}var r=t("./zlib/deflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/messages"),h=t("./zlib/zstream"),d=Object.prototype.toString,f=0,_=-1,u=0,c=8;i.prototype.push=function(t,e){var a,i,n=this.strm,l=this.options.chunkSize;if(this.ended)return!1;i=e===~~e?e:!0===e?4:0,"string"==typeof t?n.input=o.string2buf(t):"[object ArrayBuffer]"===d.call(t)?n.input=new Uint8Array(t):n.input=t,n.next_in=0,n.avail_in=n.input.length;do{if(0===n.avail_out&&(n.output=new s.Buf8(l),n.next_out=0,n.avail_out=l),1!==(a=r.deflate(n,i))&&a!==f)return this.onEnd(a),this.ended=!0,!1;0!==n.avail_out&&(0!==n.avail_in||4!==i&&2!==i)||("string"===this.options.to?this.onData(o.buf2binstring(s.shrinkBuf(n.output,n.next_out))):this.onData(s.shrinkBuf(n.output,n.next_out)))}while((n.avail_in>0||0===n.avail_out)&&1!==a);return 4===i?(a=r.deflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===f):2!==i||(this.onEnd(f),n.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===f&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Deflate=i,a.deflate=n,a.deflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.gzip=function(t,e){return e=e||{},e.gzip=!0,n(t,e)}},{"./utils/common":3,"./utils/strings":4,"./zlib/deflate":8,"./zlib/messages":13,"./zlib/zstream":15}],2:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({chunkSize:16384,windowBits:0,to:""},t||{});var e=this.options;e.raw&&e.windowBits>=0&&e.windowBits<16&&(e.windowBits=-e.windowBits,0===e.windowBits&&(e.windowBits=-15)),!(e.windowBits>=0&&e.windowBits<16)||t&&t.windowBits||(e.windowBits+=32),e.windowBits>15&&e.windowBits<48&&0==(15&e.windowBits)&&(e.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new d,this.strm.avail_out=0;var a=r.inflateInit2(this.strm,e.windowBits);if(a!==l.Z_OK)throw new Error(h[a]);this.header=new f,r.inflateGetHeader(this.strm,this.header)}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||h[a.err];return a.result}var r=t("./zlib/inflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/constants"),h=t("./zlib/messages"),d=t("./zlib/zstream"),f=t("./zlib/gzheader"),_=Object.prototype.toString;i.prototype.push=function(t,e){var a,i,n,h,d,f,u=this.strm,c=this.options.chunkSize,b=this.options.dictionary,g=!1;if(this.ended)return!1;i=e===~~e?e:!0===e?l.Z_FINISH:l.Z_NO_FLUSH,"string"==typeof t?u.input=o.binstring2buf(t):"[object ArrayBuffer]"===_.call(t)?u.input=new Uint8Array(t):u.input=t,u.next_in=0,u.avail_in=u.input.length;do{if(0===u.avail_out&&(u.output=new s.Buf8(c),u.next_out=0,u.avail_out=c),(a=r.inflate(u,l.Z_NO_FLUSH))===l.Z_NEED_DICT&&b&&(f="string"==typeof b?o.string2buf(b):"[object ArrayBuffer]"===_.call(b)?new Uint8Array(b):b,a=r.inflateSetDictionary(this.strm,f)),a===l.Z_BUF_ERROR&&!0===g&&(a=l.Z_OK,g=!1),a!==l.Z_STREAM_END&&a!==l.Z_OK)return this.onEnd(a),this.ended=!0,!1;u.next_out&&(0!==u.avail_out&&a!==l.Z_STREAM_END&&(0!==u.avail_in||i!==l.Z_FINISH&&i!==l.Z_SYNC_FLUSH)||("string"===this.options.to?(n=o.utf8border(u.output,u.next_out),h=u.next_out-n,d=o.buf2string(u.output,n),u.next_out=h,u.avail_out=c-h,h&&s.arraySet(u.output,u.output,n,h,0),this.onData(d)):this.onData(s.shrinkBuf(u.output,u.next_out)))),0===u.avail_in&&0===u.avail_out&&(g=!0)}while((u.avail_in>0||0===u.avail_out)&&a!==l.Z_STREAM_END);return a===l.Z_STREAM_END&&(i=l.Z_FINISH),i===l.Z_FINISH?(a=r.inflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===l.Z_OK):i!==l.Z_SYNC_FLUSH||(this.onEnd(l.Z_OK),u.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===l.Z_OK&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Inflate=i,a.inflate=n,a.inflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.ungzip=n},{"./utils/common":3,"./utils/strings":4,"./zlib/constants":6,"./zlib/gzheader":9,"./zlib/inflate":11,"./zlib/messages":13,"./zlib/zstream":15}],3:[function(t,e,a){"use strict";function i(t,e){return Object.prototype.hasOwnProperty.call(t,e)}var n="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;a.assign=function(t){for(var e=Array.prototype.slice.call(arguments,1);e.length;){var a=e.shift();if(a){if("object"!=typeof a)throw new TypeError(a+"must be non-object");for(var n in a)i(a,n)&&(t[n]=a[n])}}return t},a.shrinkBuf=function(t,e){return t.length===e?t:t.subarray?t.subarray(0,e):(t.length=e,t)};var r={arraySet:function(t,e,a,i,n){if(e.subarray&&t.subarray)t.set(e.subarray(a,a+i),n);else for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){var e,a,i,n,r,s;for(i=0,e=0,a=t.length;e<a;e++)i+=t[e].length;for(s=new Uint8Array(i),n=0,e=0,a=t.length;e<a;e++)r=t[e],s.set(r,n),n+=r.length;return s}},s={arraySet:function(t,e,a,i,n){for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){return[].concat.apply([],t)}};a.setTyped=function(t){t?(a.Buf8=Uint8Array,a.Buf16=Uint16Array,a.Buf32=Int32Array,a.assign(a,r)):(a.Buf8=Array,a.Buf16=Array,a.Buf32=Array,a.assign(a,s))},a.setTyped(n)},{}],4:[function(t,e,a){"use strict";function i(t,e){if(e<65537&&(t.subarray&&s||!t.subarray&&r))return String.fromCharCode.apply(null,n.shrinkBuf(t,e));for(var a="",i=0;i<e;i++)a+=String.fromCharCode(t[i]);return a}var n=t("./common"),r=!0,s=!0;try{String.fromCharCode.apply(null,[0])}catch(t){r=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(t){s=!1}for(var o=new n.Buf8(256),l=0;l<256;l++)o[l]=l>=252?6:l>=248?5:l>=240?4:l>=224?3:l>=192?2:1;o[254]=o[254]=1,a.string2buf=function(t){var e,a,i,r,s,o=t.length,l=0;for(r=0;r<o;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),l+=a<128?1:a<2048?2:a<65536?3:4;for(e=new n.Buf8(l),s=0,r=0;s<l;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),a<128?e[s++]=a:a<2048?(e[s++]=192|a>>>6,e[s++]=128|63&a):a<65536?(e[s++]=224|a>>>12,e[s++]=128|a>>>6&63,e[s++]=128|63&a):(e[s++]=240|a>>>18,e[s++]=128|a>>>12&63,e[s++]=128|a>>>6&63,e[s++]=128|63&a);return e},a.buf2binstring=function(t){return i(t,t.length)},a.binstring2buf=function(t){for(var e=new n.Buf8(t.length),a=0,i=e.length;a<i;a++)e[a]=t.charCodeAt(a);return e},a.buf2string=function(t,e){var a,n,r,s,l=e||t.length,h=new Array(2*l);for(n=0,a=0;a<l;)if((r=t[a++])<128)h[n++]=r;else if((s=o[r])>4)h[n++]=65533,a+=s-1;else{for(r&=2===s?31:3===s?15:7;s>1&&a<l;)r=r<<6|63&t[a++],s--;s>1?h[n++]=65533:r<65536?h[n++]=r:(r-=65536,h[n++]=55296|r>>10&1023,h[n++]=56320|1023&r)}return i(h,n)},a.utf8border=function(t,e){var a;for((e=e||t.length)>t.length&&(e=t.length),a=e-1;a>=0&&128==(192&t[a]);)a--;return a<0?e:0===a?e:a+o[t[a]]>e?a:e}},{"./common":3}],5:[function(t,e,a){"use strict";e.exports=function(t,e,a,i){for(var n=65535&t|0,r=t>>>16&65535|0,s=0;0!==a;){a-=s=a>2e3?2e3:a;do{r=r+(n=n+e[i++]|0)|0}while(--s);n%=65521,r%=65521}return n|r<<16|0}},{}],6:[function(t,e,a){"use strict";e.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],7:[function(t,e,a){"use strict";var i=function(){for(var t,e=[],a=0;a<256;a++){t=a;for(var i=0;i<8;i++)t=1&t?3988292384^t>>>1:t>>>1;e[a]=t}return e}();e.exports=function(t,e,a,n){var r=i,s=n+a;t^=-1;for(var o=n;o<s;o++)t=t>>>8^r[255&(t^e[o])];return-1^t}},{}],8:[function(t,e,a){"use strict";function i(t,e){return t.msg=A[e],e}function n(t){return(t<<1)-(t>4?9:0)}function r(t){for(var e=t.length;--e>=0;)t[e]=0}function s(t){var e=t.state,a=e.pending;a>t.avail_out&&(a=t.avail_out),0!==a&&(z.arraySet(t.output,e.pending_buf,e.pending_out,a,t.next_out),t.next_out+=a,e.pending_out+=a,t.total_out+=a,t.avail_out-=a,e.pending-=a,0===e.pending&&(e.pending_out=0))}function o(t,e){B._tr_flush_block(t,t.block_start>=0?t.block_start:-1,t.strstart-t.block_start,e),t.block_start=t.strstart,s(t.strm)}function l(t,e){t.pending_buf[t.pending++]=e}function h(t,e){t.pending_buf[t.pending++]=e>>>8&255,t.pending_buf[t.pending++]=255&e}function d(t,e,a,i){var n=t.avail_in;return n>i&&(n=i),0===n?0:(t.avail_in-=n,z.arraySet(e,t.input,t.next_in,n,a),1===t.state.wrap?t.adler=S(t.adler,e,n,a):2===t.state.wrap&&(t.adler=E(t.adler,e,n,a)),t.next_in+=n,t.total_in+=n,n)}function f(t,e){var a,i,n=t.max_chain_length,r=t.strstart,s=t.prev_length,o=t.nice_match,l=t.strstart>t.w_size-it?t.strstart-(t.w_size-it):0,h=t.window,d=t.w_mask,f=t.prev,_=t.strstart+at,u=h[r+s-1],c=h[r+s];t.prev_length>=t.good_match&&(n>>=2),o>t.lookahead&&(o=t.lookahead);do{if(a=e,h[a+s]===c&&h[a+s-1]===u&&h[a]===h[r]&&h[++a]===h[r+1]){r+=2,a++;do{}while(h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&r<_);if(i=at-(_-r),r=_-at,i>s){if(t.match_start=e,s=i,i>=o)break;u=h[r+s-1],c=h[r+s]}}}while((e=f[e&d])>l&&0!=--n);return s<=t.lookahead?s:t.lookahead}function _(t){var e,a,i,n,r,s=t.w_size;do{if(n=t.window_size-t.lookahead-t.strstart,t.strstart>=s+(s-it)){z.arraySet(t.window,t.window,s,s,0),t.match_start-=s,t.strstart-=s,t.block_start-=s,e=a=t.hash_size;do{i=t.head[--e],t.head[e]=i>=s?i-s:0}while(--a);e=a=s;do{i=t.prev[--e],t.prev[e]=i>=s?i-s:0}while(--a);n+=s}if(0===t.strm.avail_in)break;if(a=d(t.strm,t.window,t.strstart+t.lookahead,n),t.lookahead+=a,t.lookahead+t.insert>=et)for(r=t.strstart-t.insert,t.ins_h=t.window[r],t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+1])&t.hash_mask;t.insert&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+et-1])&t.hash_mask,t.prev[r&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=r,r++,t.insert--,!(t.lookahead+t.insert<et)););}while(t.lookahead<it&&0!==t.strm.avail_in)}function u(t,e){for(var a,i;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),0!==a&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a)),t.match_length>=et)if(i=B._tr_tally(t,t.strstart-t.match_start,t.match_length-et),t.lookahead-=t.match_length,t.match_length<=t.max_lazy_match&&t.lookahead>=et){t.match_length--;do{t.strstart++,t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart}while(0!=--t.match_length);t.strstart++}else t.strstart+=t.match_length,t.match_length=0,t.ins_h=t.window[t.strstart],t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+1])&t.hash_mask;else i=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++;if(i&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function c(t,e){for(var a,i,n;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),t.prev_length=t.match_length,t.prev_match=t.match_start,t.match_length=et-1,0!==a&&t.prev_length<t.max_lazy_match&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a),t.match_length<=5&&(t.strategy===H||t.match_length===et&&t.strstart-t.match_start>4096)&&(t.match_length=et-1)),t.prev_length>=et&&t.match_length<=t.prev_length){n=t.strstart+t.lookahead-et,i=B._tr_tally(t,t.strstart-1-t.prev_match,t.prev_length-et),t.lookahead-=t.prev_length-1,t.prev_length-=2;do{++t.strstart<=n&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart)}while(0!=--t.prev_length);if(t.match_available=0,t.match_length=et-1,t.strstart++,i&&(o(t,!1),0===t.strm.avail_out))return _t}else if(t.match_available){if((i=B._tr_tally(t,0,t.window[t.strstart-1]))&&o(t,!1),t.strstart++,t.lookahead--,0===t.strm.avail_out)return _t}else t.match_available=1,t.strstart++,t.lookahead--}return t.match_available&&(i=B._tr_tally(t,0,t.window[t.strstart-1]),t.match_available=0),t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function b(t,e){for(var a,i,n,r,s=t.window;;){if(t.lookahead<=at){if(_(t),t.lookahead<=at&&e===Z)return _t;if(0===t.lookahead)break}if(t.match_length=0,t.lookahead>=et&&t.strstart>0&&(n=t.strstart-1,(i=s[n])===s[++n]&&i===s[++n]&&i===s[++n])){r=t.strstart+at;do{}while(i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&n<r);t.match_length=at-(r-n),t.match_length>t.lookahead&&(t.match_length=t.lookahead)}if(t.match_length>=et?(a=B._tr_tally(t,1,t.match_length-et),t.lookahead-=t.match_length,t.strstart+=t.match_length,t.match_length=0):(a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++),a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function g(t,e){for(var a;;){if(0===t.lookahead&&(_(t),0===t.lookahead)){if(e===Z)return _t;break}if(t.match_length=0,a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++,a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function m(t,e,a,i,n){this.good_length=t,this.max_lazy=e,this.nice_length=a,this.max_chain=i,this.func=n}function w(t){t.window_size=2*t.w_size,r(t.head),t.max_lazy_match=x[t.level].max_lazy,t.good_match=x[t.level].good_length,t.nice_match=x[t.level].nice_length,t.max_chain_length=x[t.level].max_chain,t.strstart=0,t.block_start=0,t.lookahead=0,t.insert=0,t.match_length=t.prev_length=et-1,t.match_available=0,t.ins_h=0}function p(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=q,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new z.Buf16(2*$),this.dyn_dtree=new z.Buf16(2*(2*Q+1)),this.bl_tree=new z.Buf16(2*(2*V+1)),r(this.dyn_ltree),r(this.dyn_dtree),r(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new z.Buf16(tt+1),this.heap=new z.Buf16(2*J+1),r(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new z.Buf16(2*J+1),r(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function v(t){var e;return t&&t.state?(t.total_in=t.total_out=0,t.data_type=Y,e=t.state,e.pending=0,e.pending_out=0,e.wrap<0&&(e.wrap=-e.wrap),e.status=e.wrap?rt:dt,t.adler=2===e.wrap?0:1,e.last_flush=Z,B._tr_init(e),D):i(t,U)}function k(t){var e=v(t);return e===D&&w(t.state),e}function y(t,e,a,n,r,s){if(!t)return U;var o=1;if(e===L&&(e=6),n<0?(o=0,n=-n):n>15&&(o=2,n-=16),r<1||r>G||a!==q||n<8||n>15||e<0||e>9||s<0||s>M)return i(t,U);8===n&&(n=9);var l=new p;return t.state=l,l.strm=t,l.wrap=o,l.gzhead=null,l.w_bits=n,l.w_size=1<<l.w_bits,l.w_mask=l.w_size-1,l.hash_bits=r+7,l.hash_size=1<<l.hash_bits,l.hash_mask=l.hash_size-1,l.hash_shift=~~((l.hash_bits+et-1)/et),l.window=new z.Buf8(2*l.w_size),l.head=new z.Buf16(l.hash_size),l.prev=new z.Buf16(l.w_size),l.lit_bufsize=1<<r+6,l.pending_buf_size=4*l.lit_bufsize,l.pending_buf=new z.Buf8(l.pending_buf_size),l.d_buf=1*l.lit_bufsize,l.l_buf=3*l.lit_bufsize,l.level=e,l.strategy=s,l.method=a,k(t)}var x,z=t("../utils/common"),B=t("./trees"),S=t("./adler32"),E=t("./crc32"),A=t("./messages"),Z=0,R=1,C=3,N=4,O=5,D=0,I=1,U=-2,T=-3,F=-5,L=-1,H=1,j=2,K=3,M=4,P=0,Y=2,q=8,G=9,X=15,W=8,J=286,Q=30,V=19,$=2*J+1,tt=15,et=3,at=258,it=at+et+1,nt=32,rt=42,st=69,ot=73,lt=91,ht=103,dt=113,ft=666,_t=1,ut=2,ct=3,bt=4,gt=3;x=[new m(0,0,0,0,function(t,e){var a=65535;for(a>t.pending_buf_size-5&&(a=t.pending_buf_size-5);;){if(t.lookahead<=1){if(_(t),0===t.lookahead&&e===Z)return _t;if(0===t.lookahead)break}t.strstart+=t.lookahead,t.lookahead=0;var i=t.block_start+a;if((0===t.strstart||t.strstart>=i)&&(t.lookahead=t.strstart-i,t.strstart=i,o(t,!1),0===t.strm.avail_out))return _t;if(t.strstart-t.block_start>=t.w_size-it&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):(t.strstart>t.block_start&&(o(t,!1),t.strm.avail_out),_t)}),new m(4,4,8,4,u),new m(4,5,16,8,u),new m(4,6,32,32,u),new m(4,4,16,16,c),new m(8,16,32,32,c),new m(8,16,128,128,c),new m(8,32,128,256,c),new m(32,128,258,1024,c),new m(32,258,258,4096,c)],a.deflateInit=function(t,e){return y(t,e,q,X,W,P)},a.deflateInit2=y,a.deflateReset=k,a.deflateResetKeep=v,a.deflateSetHeader=function(t,e){return t&&t.state?2!==t.state.wrap?U:(t.state.gzhead=e,D):U},a.deflate=function(t,e){var a,o,d,f;if(!t||!t.state||e>O||e<0)return t?i(t,U):U;if(o=t.state,!t.output||!t.input&&0!==t.avail_in||o.status===ft&&e!==N)return i(t,0===t.avail_out?F:U);if(o.strm=t,a=o.last_flush,o.last_flush=e,o.status===rt)if(2===o.wrap)t.adler=0,l(o,31),l(o,139),l(o,8),o.gzhead?(l(o,(o.gzhead.text?1:0)+(o.gzhead.hcrc?2:0)+(o.gzhead.extra?4:0)+(o.gzhead.name?8:0)+(o.gzhead.comment?16:0)),l(o,255&o.gzhead.time),l(o,o.gzhead.time>>8&255),l(o,o.gzhead.time>>16&255),l(o,o.gzhead.time>>24&255),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,255&o.gzhead.os),o.gzhead.extra&&o.gzhead.extra.length&&(l(o,255&o.gzhead.extra.length),l(o,o.gzhead.extra.length>>8&255)),o.gzhead.hcrc&&(t.adler=E(t.adler,o.pending_buf,o.pending,0)),o.gzindex=0,o.status=st):(l(o,0),l(o,0),l(o,0),l(o,0),l(o,0),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,gt),o.status=dt);else{var _=q+(o.w_bits-8<<4)<<8;_|=(o.strategy>=j||o.level<2?0:o.level<6?1:6===o.level?2:3)<<6,0!==o.strstart&&(_|=nt),_+=31-_%31,o.status=dt,h(o,_),0!==o.strstart&&(h(o,t.adler>>>16),h(o,65535&t.adler)),t.adler=1}if(o.status===st)if(o.gzhead.extra){for(d=o.pending;o.gzindex<(65535&o.gzhead.extra.length)&&(o.pending!==o.pending_buf_size||(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending!==o.pending_buf_size));)l(o,255&o.gzhead.extra[o.gzindex]),o.gzindex++;o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),o.gzindex===o.gzhead.extra.length&&(o.gzindex=0,o.status=ot)}else o.status=ot;if(o.status===ot)if(o.gzhead.name){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.name.length?255&o.gzhead.name.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.gzindex=0,o.status=lt)}else o.status=lt;if(o.status===lt)if(o.gzhead.comment){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.comment.length?255&o.gzhead.comment.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.status=ht)}else o.status=ht;if(o.status===ht&&(o.gzhead.hcrc?(o.pending+2>o.pending_buf_size&&s(t),o.pending+2<=o.pending_buf_size&&(l(o,255&t.adler),l(o,t.adler>>8&255),t.adler=0,o.status=dt)):o.status=dt),0!==o.pending){if(s(t),0===t.avail_out)return o.last_flush=-1,D}else if(0===t.avail_in&&n(e)<=n(a)&&e!==N)return i(t,F);if(o.status===ft&&0!==t.avail_in)return i(t,F);if(0!==t.avail_in||0!==o.lookahead||e!==Z&&o.status!==ft){var u=o.strategy===j?g(o,e):o.strategy===K?b(o,e):x[o.level].func(o,e);if(u!==ct&&u!==bt||(o.status=ft),u===_t||u===ct)return 0===t.avail_out&&(o.last_flush=-1),D;if(u===ut&&(e===R?B._tr_align(o):e!==O&&(B._tr_stored_block(o,0,0,!1),e===C&&(r(o.head),0===o.lookahead&&(o.strstart=0,o.block_start=0,o.insert=0))),s(t),0===t.avail_out))return o.last_flush=-1,D}return e!==N?D:o.wrap<=0?I:(2===o.wrap?(l(o,255&t.adler),l(o,t.adler>>8&255),l(o,t.adler>>16&255),l(o,t.adler>>24&255),l(o,255&t.total_in),l(o,t.total_in>>8&255),l(o,t.total_in>>16&255),l(o,t.total_in>>24&255)):(h(o,t.adler>>>16),h(o,65535&t.adler)),s(t),o.wrap>0&&(o.wrap=-o.wrap),0!==o.pending?D:I)},a.deflateEnd=function(t){var e;return t&&t.state?(e=t.state.status)!==rt&&e!==st&&e!==ot&&e!==lt&&e!==ht&&e!==dt&&e!==ft?i(t,U):(t.state=null,e===dt?i(t,T):D):U},a.deflateSetDictionary=function(t,e){var a,i,n,s,o,l,h,d,f=e.length;if(!t||!t.state)return U;if(a=t.state,2===(s=a.wrap)||1===s&&a.status!==rt||a.lookahead)return U;for(1===s&&(t.adler=S(t.adler,e,f,0)),a.wrap=0,f>=a.w_size&&(0===s&&(r(a.head),a.strstart=0,a.block_start=0,a.insert=0),d=new z.Buf8(a.w_size),z.arraySet(d,e,f-a.w_size,a.w_size,0),e=d,f=a.w_size),o=t.avail_in,l=t.next_in,h=t.input,t.avail_in=f,t.next_in=0,t.input=e,_(a);a.lookahead>=et;){i=a.strstart,n=a.lookahead-(et-1);do{a.ins_h=(a.ins_h<<a.hash_shift^a.window[i+et-1])&a.hash_mask,a.prev[i&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=i,i++}while(--n);a.strstart=i,a.lookahead=et-1,_(a)}return a.strstart+=a.lookahead,a.block_start=a.strstart,a.insert=a.lookahead,a.lookahead=0,a.match_length=a.prev_length=et-1,a.match_available=0,t.next_in=l,t.input=h,t.avail_in=o,a.wrap=s,D},a.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./messages":13,"./trees":14}],9:[function(t,e,a){"use strict";e.exports=function(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}},{}],10:[function(t,e,a){"use strict";e.exports=function(t,e){var a,i,n,r,s,o,l,h,d,f,_,u,c,b,g,m,w,p,v,k,y,x,z,B,S;a=t.state,i=t.next_in,B=t.input,n=i+(t.avail_in-5),r=t.next_out,S=t.output,s=r-(e-t.avail_out),o=r+(t.avail_out-257),l=a.dmax,h=a.wsize,d=a.whave,f=a.wnext,_=a.window,u=a.hold,c=a.bits,b=a.lencode,g=a.distcode,m=(1<<a.lenbits)-1,w=(1<<a.distbits)-1;t:do{c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=b[u&m];e:for(;;){if(v=p>>>24,u>>>=v,c-=v,0===(v=p>>>16&255))S[r++]=65535&p;else{if(!(16&v)){if(0==(64&v)){p=b[(65535&p)+(u&(1<<v)-1)];continue e}if(32&v){a.mode=12;break t}t.msg="invalid literal/length code",a.mode=30;break t}k=65535&p,(v&=15)&&(c<v&&(u+=B[i++]<<c,c+=8),k+=u&(1<<v)-1,u>>>=v,c-=v),c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=g[u&w];a:for(;;){if(v=p>>>24,u>>>=v,c-=v,!(16&(v=p>>>16&255))){if(0==(64&v)){p=g[(65535&p)+(u&(1<<v)-1)];continue a}t.msg="invalid distance code",a.mode=30;break t}if(y=65535&p,v&=15,c<v&&(u+=B[i++]<<c,(c+=8)<v&&(u+=B[i++]<<c,c+=8)),(y+=u&(1<<v)-1)>l){t.msg="invalid distance too far back",a.mode=30;break t}if(u>>>=v,c-=v,v=r-s,y>v){if((v=y-v)>d&&a.correct){t.msg="invalid distance too far back",a.mode=30;break t}if(x=0,z=_,0===f){if(x+=h-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}else if(f<v){if(x+=h+f-v,(v-=f)<k){k-=v;do{S[r++]=_[x++]}while(--v);if(x=0,f<k){k-=v=f;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}}else if(x+=f-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}for(;k>2;)S[r++]=z[x++],S[r++]=z[x++],S[r++]=z[x++],k-=3;k&&(S[r++]=z[x++],k>1&&(S[r++]=z[x++]))}else{x=r-y;do{S[r++]=S[x++],S[r++]=S[x++],S[r++]=S[x++],k-=3}while(k>2);k&&(S[r++]=S[x++],k>1&&(S[r++]=S[x++]))}break}}break}}while(i<n&&r<o);i-=k=c>>3,u&=(1<<(c-=k<<3))-1,t.next_in=i,t.next_out=r,t.avail_in=i<n?n-i+5:5-(i-n),t.avail_out=r<o?o-r+257:257-(r-o),a.hold=u,a.bits=c}},{}],11:[function(t,e,a){"use strict";function i(t){return(t>>>24&255)+(t>>>8&65280)+((65280&t)<<8)+((255&t)<<24)}function n(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new u.Buf16(320),this.work=new u.Buf16(288),this.lendyn=null,this.distdyn=null,this.correct=0,this.back=0,this.was=0}function r(t){var e;return t&&t.state?(e=t.state,t.total_in=t.total_out=e.total=0,t.msg="",e.wrap&&(t.adler=1&e.wrap),e.mode=N,e.last=0,e.havedict=0,e.dmax=32768,e.head=null,e.hold=0,e.bits=0,e.lencode=e.lendyn=new u.Buf32(dt),e.distcode=e.distdyn=new u.Buf32(ft),e.correct=1,e.back=-1,z):E}function s(t){var e;return t&&t.state?(e=t.state,e.wsize=0,e.whave=0,e.wnext=0,r(t)):E}function o(t,e){var a,i;return t&&t.state?(i=t.state,e<0?(a=0,e=-e):(a=1+(e>>4),e<48&&(e&=15)),e&&(e<8||e>15)?E:(null!==i.window&&i.wbits!==e&&(i.window=null),i.wrap=a,i.wbits=e,s(t))):E}function l(t,e){var a,i;return t?(i=new n,t.state=i,i.window=null,(a=o(t,e))!==z&&(t.state=null),a):E}function h(t){if(ut){var e;for(f=new u.Buf32(512),_=new u.Buf32(32),e=0;e<144;)t.lens[e++]=8;for(;e<256;)t.lens[e++]=9;for(;e<280;)t.lens[e++]=7;for(;e<288;)t.lens[e++]=8;for(m(p,t.lens,0,288,f,0,t.work,{bits:9}),e=0;e<32;)t.lens[e++]=5;m(v,t.lens,0,32,_,0,t.work,{bits:5}),ut=!1}t.lencode=f,t.lenbits=9,t.distcode=_,t.distbits=5}function d(t,e,a,i){var n,r=t.state;return null===r.window&&(r.wsize=1<<r.wbits,r.wnext=0,r.whave=0,r.window=new u.Buf8(r.wsize)),i>=r.wsize?(u.arraySet(r.window,e,a-r.wsize,r.wsize,0),r.wnext=0,r.whave=r.wsize):((n=r.wsize-r.wnext)>i&&(n=i),u.arraySet(r.window,e,a-i,n,r.wnext),(i-=n)?(u.arraySet(r.window,e,a-i,i,0),r.wnext=i,r.whave=r.wsize):(r.wnext+=n,r.wnext===r.wsize&&(r.wnext=0),r.whave<r.wsize&&(r.whave+=n))),0}var f,_,u=t("../utils/common"),c=t("./adler32"),b=t("./crc32"),g=t("./inffast"),m=t("./inftrees"),w=0,p=1,v=2,k=4,y=5,x=6,z=0,B=1,S=2,E=-2,A=-3,Z=-4,R=-5,C=8,N=1,O=2,D=3,I=4,U=5,T=6,F=7,L=8,H=9,j=10,K=11,M=12,P=13,Y=14,q=15,G=16,X=17,W=18,J=19,Q=20,V=21,$=22,tt=23,et=24,at=25,it=26,nt=27,rt=28,st=29,ot=30,lt=31,ht=32,dt=852,ft=592,_t=15,ut=!0;a.inflateReset=s,a.inflateReset2=o,a.inflateResetKeep=r,a.inflateInit=function(t){return l(t,_t)},a.inflateInit2=l,a.inflate=function(t,e){var a,n,r,s,o,l,f,_,dt,ft,_t,ut,ct,bt,gt,mt,wt,pt,vt,kt,yt,xt,zt,Bt,St=0,Et=new u.Buf8(4),At=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!t||!t.state||!t.output||!t.input&&0!==t.avail_in)return E;(a=t.state).mode===M&&(a.mode=P),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,ft=l,_t=f,xt=z;t:for(;;)switch(a.mode){case N:if(0===a.wrap){a.mode=P;break}for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(2&a.wrap&&35615===_){a.check=0,Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0),_=0,dt=0,a.mode=O;break}if(a.flags=0,a.head&&(a.head.done=!1),!(1&a.wrap)||(((255&_)<<8)+(_>>8))%31){t.msg="incorrect header check",a.mode=ot;break}if((15&_)!==C){t.msg="unknown compression method",a.mode=ot;break}if(_>>>=4,dt-=4,yt=8+(15&_),0===a.wbits)a.wbits=yt;else if(yt>a.wbits){t.msg="invalid window size",a.mode=ot;break}a.dmax=1<<yt,t.adler=a.check=1,a.mode=512&_?j:M,_=0,dt=0;break;case O:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.flags=_,(255&a.flags)!==C){t.msg="unknown compression method",a.mode=ot;break}if(57344&a.flags){t.msg="unknown header flags set",a.mode=ot;break}a.head&&(a.head.text=_>>8&1),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=D;case D:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.time=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,Et[2]=_>>>16&255,Et[3]=_>>>24&255,a.check=b(a.check,Et,4,0)),_=0,dt=0,a.mode=I;case I:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.xflags=255&_,a.head.os=_>>8),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=U;case U:if(1024&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length=_,a.head&&(a.head.extra_len=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0}else a.head&&(a.head.extra=null);a.mode=T;case T:if(1024&a.flags&&((ut=a.length)>l&&(ut=l),ut&&(a.head&&(yt=a.head.extra_len-a.length,a.head.extra||(a.head.extra=new Array(a.head.extra_len)),u.arraySet(a.head.extra,n,s,ut,yt)),512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,a.length-=ut),a.length))break t;a.length=0,a.mode=F;case F:if(2048&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.name+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.name=null);a.length=0,a.mode=L;case L:if(4096&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.comment+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.comment=null);a.mode=H;case H:if(512&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(65535&a.check)){t.msg="header crc mismatch",a.mode=ot;break}_=0,dt=0}a.head&&(a.head.hcrc=a.flags>>9&1,a.head.done=!0),t.adler=a.check=0,a.mode=M;break;case j:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}t.adler=a.check=i(_),_=0,dt=0,a.mode=K;case K:if(0===a.havedict)return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,S;t.adler=a.check=1,a.mode=M;case M:if(e===y||e===x)break t;case P:if(a.last){_>>>=7&dt,dt-=7&dt,a.mode=nt;break}for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}switch(a.last=1&_,_>>>=1,dt-=1,3&_){case 0:a.mode=Y;break;case 1:if(h(a),a.mode=Q,e===x){_>>>=2,dt-=2;break t}break;case 2:a.mode=X;break;case 3:t.msg="invalid block type",a.mode=ot}_>>>=2,dt-=2;break;case Y:for(_>>>=7&dt,dt-=7&dt;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if((65535&_)!=(_>>>16^65535)){t.msg="invalid stored block lengths",a.mode=ot;break}if(a.length=65535&_,_=0,dt=0,a.mode=q,e===x)break t;case q:a.mode=G;case G:if(ut=a.length){if(ut>l&&(ut=l),ut>f&&(ut=f),0===ut)break t;u.arraySet(r,n,s,ut,o),l-=ut,s+=ut,f-=ut,o+=ut,a.length-=ut;break}a.mode=M;break;case X:for(;dt<14;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.nlen=257+(31&_),_>>>=5,dt-=5,a.ndist=1+(31&_),_>>>=5,dt-=5,a.ncode=4+(15&_),_>>>=4,dt-=4,a.nlen>286||a.ndist>30){t.msg="too many length or distance symbols",a.mode=ot;break}a.have=0,a.mode=W;case W:for(;a.have<a.ncode;){for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.lens[At[a.have++]]=7&_,_>>>=3,dt-=3}for(;a.have<19;)a.lens[At[a.have++]]=0;if(a.lencode=a.lendyn,a.lenbits=7,zt={bits:a.lenbits},xt=m(w,a.lens,0,19,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid code lengths set",a.mode=ot;break}a.have=0,a.mode=J;case J:for(;a.have<a.nlen+a.ndist;){for(;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(wt<16)_>>>=gt,dt-=gt,a.lens[a.have++]=wt;else{if(16===wt){for(Bt=gt+2;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_>>>=gt,dt-=gt,0===a.have){t.msg="invalid bit length repeat",a.mode=ot;break}yt=a.lens[a.have-1],ut=3+(3&_),_>>>=2,dt-=2}else if(17===wt){for(Bt=gt+3;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=3+(7&(_>>>=gt)),_>>>=3,dt-=3}else{for(Bt=gt+7;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=11+(127&(_>>>=gt)),_>>>=7,dt-=7}if(a.have+ut>a.nlen+a.ndist){t.msg="invalid bit length repeat",a.mode=ot;break}for(;ut--;)a.lens[a.have++]=yt}}if(a.mode===ot)break;if(0===a.lens[256]){t.msg="invalid code -- missing end-of-block",a.mode=ot;break}if(a.lenbits=9,zt={bits:a.lenbits},xt=m(p,a.lens,0,a.nlen,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid literal/lengths set",a.mode=ot;break}if(a.distbits=6,a.distcode=a.distdyn,zt={bits:a.distbits},xt=m(v,a.lens,a.nlen,a.ndist,a.distcode,0,a.work,zt),a.distbits=zt.bits,xt){t.msg="invalid distances set",a.mode=ot;break}if(a.mode=Q,e===x)break t;case Q:a.mode=V;case V:if(l>=6&&f>=258){t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,g(t,_t),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,a.mode===M&&(a.back=-1);break}for(a.back=0;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(mt&&0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.lencode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,a.length=wt,0===mt){a.mode=it;break}if(32&mt){a.back=-1,a.mode=M;break}if(64&mt){t.msg="invalid literal/length code",a.mode=ot;break}a.extra=15&mt,a.mode=$;case $:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}a.was=a.length,a.mode=tt;case tt:for(;St=a.distcode[_&(1<<a.distbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.distcode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,64&mt){t.msg="invalid distance code",a.mode=ot;break}a.offset=wt,a.extra=15&mt,a.mode=et;case et:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.offset+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}if(a.offset>a.dmax){t.msg="invalid distance too far back",a.mode=ot;break}a.mode=at;case at:if(0===f)break t;if(ut=_t-f,a.offset>ut){if((ut=a.offset-ut)>a.whave&&a.correct){t.msg="invalid distance too far back",a.mode=ot;break}ut>a.wnext?(ut-=a.wnext,ct=a.wsize-ut):ct=a.wnext-ut,ut>a.length&&(ut=a.length),bt=a.window}else bt=r,ct=o-a.offset,ut=a.length;ut>f&&(ut=f),f-=ut,a.length-=ut;do{r[o++]=bt[ct++]}while(--ut);0===a.length&&(a.mode=V);break;case it:if(0===f)break t;r[o++]=a.length,f--,a.mode=V;break;case nt:if(a.wrap){for(;dt<32;){if(0===l)break t;l--,_|=n[s++]<<dt,dt+=8}if(_t-=f,t.total_out+=_t,a.total+=_t,_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,o-_t):c(a.check,r,_t,o-_t)),_t=f,(a.flags?_:i(_))!==a.check){t.msg="incorrect data check",a.mode=ot;break}_=0,dt=0}a.mode=rt;case rt:if(a.wrap&&a.flags){for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(4294967295&a.total)){t.msg="incorrect length check",a.mode=ot;break}_=0,dt=0}a.mode=st;case st:xt=B;break t;case ot:xt=A;break t;case lt:return Z;case ht:default:return E}return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,(a.wsize||_t!==t.avail_out&&a.mode<ot&&(a.mode<nt||e!==k))&&d(t,t.output,t.next_out,_t-t.avail_out)?(a.mode=lt,Z):(ft-=t.avail_in,_t-=t.avail_out,t.total_in+=ft,t.total_out+=_t,a.total+=_t,a.wrap&&_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,t.next_out-_t):c(a.check,r,_t,t.next_out-_t)),t.data_type=a.bits+(a.last?64:0)+(a.mode===M?128:0)+(a.mode===Q||a.mode===q?256:0),(0===ft&&0===_t||e===k)&&xt===z&&(xt=R),xt)},a.inflateEnd=function(t){if(!t||!t.state)return E;var e=t.state;return e.window&&(e.window=null),t.state=null,z},a.inflateGetHeader=function(t,e){var a;return t&&t.state?0==(2&(a=t.state).wrap)?E:(a.head=e,e.done=!1,z):E},a.inflateSetDictionary=function(t,e){var a,i,n=e.length;return t&&t.state?0!==(a=t.state).wrap&&a.mode!==K?E:a.mode===K&&(i=1,(i=c(i,e,n,0))!==a.check)?A:d(t,e,n,n)?(a.mode=lt,Z):(a.havedict=1,z):E},a.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./inffast":10,"./inftrees":12}],12:[function(t,e,a){"use strict";var i=t("../utils/common"),n=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],r=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],s=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],o=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];e.exports=function(t,e,a,l,h,d,f,_){var u,c,b,g,m,w,p,v,k,y=_.bits,x=0,z=0,B=0,S=0,E=0,A=0,Z=0,R=0,C=0,N=0,O=null,D=0,I=new i.Buf16(16),U=new i.Buf16(16),T=null,F=0;for(x=0;x<=15;x++)I[x]=0;for(z=0;z<l;z++)I[e[a+z]]++;for(E=y,S=15;S>=1&&0===I[S];S--);if(E>S&&(E=S),0===S)return h[d++]=20971520,h[d++]=20971520,_.bits=1,0;for(B=1;B<S&&0===I[B];B++);for(E<B&&(E=B),R=1,x=1;x<=15;x++)if(R<<=1,(R-=I[x])<0)return-1;if(R>0&&(0===t||1!==S))return-1;for(U[1]=0,x=1;x<15;x++)U[x+1]=U[x]+I[x];for(z=0;z<l;z++)0!==e[a+z]&&(f[U[e[a+z]]++]=z);if(0===t?(O=T=f,w=19):1===t?(O=n,D-=257,T=r,F-=257,w=256):(O=s,T=o,w=-1),N=0,z=0,x=B,m=d,A=E,Z=0,b=-1,C=1<<E,g=C-1,1===t&&C>852||2===t&&C>592)return 1;for(;;){p=x-Z,f[z]<w?(v=0,k=f[z]):f[z]>w?(v=T[F+f[z]],k=O[D+f[z]]):(v=96,k=0),u=1<<x-Z,B=c=1<<A;do{h[m+(N>>Z)+(c-=u)]=p<<24|v<<16|k|0}while(0!==c);for(u=1<<x-1;N&u;)u>>=1;if(0!==u?(N&=u-1,N+=u):N=0,z++,0==--I[x]){if(x===S)break;x=e[a+f[z]]}if(x>E&&(N&g)!==b){for(0===Z&&(Z=E),m+=B,R=1<<(A=x-Z);A+Z<S&&!((R-=I[A+Z])<=0);)A++,R<<=1;if(C+=1<<A,1===t&&C>852||2===t&&C>592)return 1;h[b=N&g]=E<<24|A<<16|m-d|0}}return 0!==N&&(h[m+N]=x-Z<<24|64<<16|0),_.bits=E,0}},{"../utils/common":3}],13:[function(t,e,a){"use strict";e.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],14:[function(t,e,a){"use strict";function i(t){for(var e=t.length;--e>=0;)t[e]=0}function n(t,e,a,i,n){this.static_tree=t,this.extra_bits=e,this.extra_base=a,this.elems=i,this.max_length=n,this.has_stree=t&&t.length}function r(t,e){this.dyn_tree=t,this.max_code=0,this.stat_desc=e}function s(t){return t<256?et[t]:et[256+(t>>>7)]}function o(t,e){t.pending_buf[t.pending++]=255&e,t.pending_buf[t.pending++]=e>>>8&255}function l(t,e,a){t.bi_valid>M-a?(t.bi_buf|=e<<t.bi_valid&65535,o(t,t.bi_buf),t.bi_buf=e>>M-t.bi_valid,t.bi_valid+=a-M):(t.bi_buf|=e<<t.bi_valid&65535,t.bi_valid+=a)}function h(t,e,a){l(t,a[2*e],a[2*e+1])}function d(t,e){var a=0;do{a|=1&t,t>>>=1,a<<=1}while(--e>0);return a>>>1}function f(t){16===t.bi_valid?(o(t,t.bi_buf),t.bi_buf=0,t.bi_valid=0):t.bi_valid>=8&&(t.pending_buf[t.pending++]=255&t.bi_buf,t.bi_buf>>=8,t.bi_valid-=8)}function _(t,e){var a,i,n,r,s,o,l=e.dyn_tree,h=e.max_code,d=e.stat_desc.static_tree,f=e.stat_desc.has_stree,_=e.stat_desc.extra_bits,u=e.stat_desc.extra_base,c=e.stat_desc.max_length,b=0;for(r=0;r<=K;r++)t.bl_count[r]=0;for(l[2*t.heap[t.heap_max]+1]=0,a=t.heap_max+1;a<j;a++)(r=l[2*l[2*(i=t.heap[a])+1]+1]+1)>c&&(r=c,b++),l[2*i+1]=r,i>h||(t.bl_count[r]++,s=0,i>=u&&(s=_[i-u]),o=l[2*i],t.opt_len+=o*(r+s),f&&(t.static_len+=o*(d[2*i+1]+s)));if(0!==b){do{for(r=c-1;0===t.bl_count[r];)r--;t.bl_count[r]--,t.bl_count[r+1]+=2,t.bl_count[c]--,b-=2}while(b>0);for(r=c;0!==r;r--)for(i=t.bl_count[r];0!==i;)(n=t.heap[--a])>h||(l[2*n+1]!==r&&(t.opt_len+=(r-l[2*n+1])*l[2*n],l[2*n+1]=r),i--)}}function u(t,e,a){var i,n,r=new Array(K+1),s=0;for(i=1;i<=K;i++)r[i]=s=s+a[i-1]<<1;for(n=0;n<=e;n++){var o=t[2*n+1];0!==o&&(t[2*n]=d(r[o]++,o))}}function c(){var t,e,a,i,r,s=new Array(K+1);for(a=0,i=0;i<U-1;i++)for(it[i]=a,t=0;t<1<<W[i];t++)at[a++]=i;for(at[a-1]=i,r=0,i=0;i<16;i++)for(nt[i]=r,t=0;t<1<<J[i];t++)et[r++]=i;for(r>>=7;i<L;i++)for(nt[i]=r<<7,t=0;t<1<<J[i]-7;t++)et[256+r++]=i;for(e=0;e<=K;e++)s[e]=0;for(t=0;t<=143;)$[2*t+1]=8,t++,s[8]++;for(;t<=255;)$[2*t+1]=9,t++,s[9]++;for(;t<=279;)$[2*t+1]=7,t++,s[7]++;for(;t<=287;)$[2*t+1]=8,t++,s[8]++;for(u($,F+1,s),t=0;t<L;t++)tt[2*t+1]=5,tt[2*t]=d(t,5);rt=new n($,W,T+1,F,K),st=new n(tt,J,0,L,K),ot=new n(new Array(0),Q,0,H,P)}function b(t){var e;for(e=0;e<F;e++)t.dyn_ltree[2*e]=0;for(e=0;e<L;e++)t.dyn_dtree[2*e]=0;for(e=0;e<H;e++)t.bl_tree[2*e]=0;t.dyn_ltree[2*Y]=1,t.opt_len=t.static_len=0,t.last_lit=t.matches=0}function g(t){t.bi_valid>8?o(t,t.bi_buf):t.bi_valid>0&&(t.pending_buf[t.pending++]=t.bi_buf),t.bi_buf=0,t.bi_valid=0}function m(t,e,a,i){g(t),i&&(o(t,a),o(t,~a)),A.arraySet(t.pending_buf,t.window,e,a,t.pending),t.pending+=a}function w(t,e,a,i){var n=2*e,r=2*a;return t[n]<t[r]||t[n]===t[r]&&i[e]<=i[a]}function p(t,e,a){for(var i=t.heap[a],n=a<<1;n<=t.heap_len&&(n<t.heap_len&&w(e,t.heap[n+1],t.heap[n],t.depth)&&n++,!w(e,i,t.heap[n],t.depth));)t.heap[a]=t.heap[n],a=n,n<<=1;t.heap[a]=i}function v(t,e,a){var i,n,r,o,d=0;if(0!==t.last_lit)do{i=t.pending_buf[t.d_buf+2*d]<<8|t.pending_buf[t.d_buf+2*d+1],n=t.pending_buf[t.l_buf+d],d++,0===i?h(t,n,e):(h(t,(r=at[n])+T+1,e),0!==(o=W[r])&&l(t,n-=it[r],o),h(t,r=s(--i),a),0!==(o=J[r])&&l(t,i-=nt[r],o))}while(d<t.last_lit);h(t,Y,e)}function k(t,e){var a,i,n,r=e.dyn_tree,s=e.stat_desc.static_tree,o=e.stat_desc.has_stree,l=e.stat_desc.elems,h=-1;for(t.heap_len=0,t.heap_max=j,a=0;a<l;a++)0!==r[2*a]?(t.heap[++t.heap_len]=h=a,t.depth[a]=0):r[2*a+1]=0;for(;t.heap_len<2;)r[2*(n=t.heap[++t.heap_len]=h<2?++h:0)]=1,t.depth[n]=0,t.opt_len--,o&&(t.static_len-=s[2*n+1]);for(e.max_code=h,a=t.heap_len>>1;a>=1;a--)p(t,r,a);n=l;do{a=t.heap[1],t.heap[1]=t.heap[t.heap_len--],p(t,r,1),i=t.heap[1],t.heap[--t.heap_max]=a,t.heap[--t.heap_max]=i,r[2*n]=r[2*a]+r[2*i],t.depth[n]=(t.depth[a]>=t.depth[i]?t.depth[a]:t.depth[i])+1,r[2*a+1]=r[2*i+1]=n,t.heap[1]=n++,p(t,r,1)}while(t.heap_len>=2);t.heap[--t.heap_max]=t.heap[1],_(t,e),u(r,h,t.bl_count)}function y(t,e,a){var i,n,r=-1,s=e[1],o=0,l=7,h=4;for(0===s&&(l=138,h=3),e[2*(a+1)+1]=65535,i=0;i<=a;i++)n=s,s=e[2*(i+1)+1],++o<l&&n===s||(o<h?t.bl_tree[2*n]+=o:0!==n?(n!==r&&t.bl_tree[2*n]++,t.bl_tree[2*q]++):o<=10?t.bl_tree[2*G]++:t.bl_tree[2*X]++,o=0,r=n,0===s?(l=138,h=3):n===s?(l=6,h=3):(l=7,h=4))}function x(t,e,a){var i,n,r=-1,s=e[1],o=0,d=7,f=4;for(0===s&&(d=138,f=3),i=0;i<=a;i++)if(n=s,s=e[2*(i+1)+1],!(++o<d&&n===s)){if(o<f)do{h(t,n,t.bl_tree)}while(0!=--o);else 0!==n?(n!==r&&(h(t,n,t.bl_tree),o--),h(t,q,t.bl_tree),l(t,o-3,2)):o<=10?(h(t,G,t.bl_tree),l(t,o-3,3)):(h(t,X,t.bl_tree),l(t,o-11,7));o=0,r=n,0===s?(d=138,f=3):n===s?(d=6,f=3):(d=7,f=4)}}function z(t){var e;for(y(t,t.dyn_ltree,t.l_desc.max_code),y(t,t.dyn_dtree,t.d_desc.max_code),k(t,t.bl_desc),e=H-1;e>=3&&0===t.bl_tree[2*V[e]+1];e--);return t.opt_len+=3*(e+1)+5+5+4,e}function B(t,e,a,i){var n;for(l(t,e-257,5),l(t,a-1,5),l(t,i-4,4),n=0;n<i;n++)l(t,t.bl_tree[2*V[n]+1],3);x(t,t.dyn_ltree,e-1),x(t,t.dyn_dtree,a-1)}function S(t){var e,a=4093624447;for(e=0;e<=31;e++,a>>>=1)if(1&a&&0!==t.dyn_ltree[2*e])return R;if(0!==t.dyn_ltree[18]||0!==t.dyn_ltree[20]||0!==t.dyn_ltree[26])return C;for(e=32;e<T;e++)if(0!==t.dyn_ltree[2*e])return C;return R}function E(t,e,a,i){l(t,(O<<1)+(i?1:0),3),m(t,e,a,!0)}var A=t("../utils/common"),Z=4,R=0,C=1,N=2,O=0,D=1,I=2,U=29,T=256,F=T+1+U,L=30,H=19,j=2*F+1,K=15,M=16,P=7,Y=256,q=16,G=17,X=18,W=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],J=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],Q=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],V=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],$=new Array(2*(F+2));i($);var tt=new Array(2*L);i(tt);var et=new Array(512);i(et);var at=new Array(256);i(at);var it=new Array(U);i(it);var nt=new Array(L);i(nt);var rt,st,ot,lt=!1;a._tr_init=function(t){lt||(c(),lt=!0),t.l_desc=new r(t.dyn_ltree,rt),t.d_desc=new r(t.dyn_dtree,st),t.bl_desc=new r(t.bl_tree,ot),t.bi_buf=0,t.bi_valid=0,b(t)},a._tr_stored_block=E,a._tr_flush_block=function(t,e,a,i){var n,r,s=0;t.level>0?(t.strm.data_type===N&&(t.strm.data_type=S(t)),k(t,t.l_desc),k(t,t.d_desc),s=z(t),n=t.opt_len+3+7>>>3,(r=t.static_len+3+7>>>3)<=n&&(n=r)):n=r=a+5,a+4<=n&&-1!==e?E(t,e,a,i):t.strategy===Z||r===n?(l(t,(D<<1)+(i?1:0),3),v(t,$,tt)):(l(t,(I<<1)+(i?1:0),3),B(t,t.l_desc.max_code+1,t.d_desc.max_code+1,s+1),v(t,t.dyn_ltree,t.dyn_dtree)),b(t),i&&g(t)},a._tr_tally=function(t,e,a){return t.pending_buf[t.d_buf+2*t.last_lit]=e>>>8&255,t.pending_buf[t.d_buf+2*t.last_lit+1]=255&e,t.pending_buf[t.l_buf+t.last_lit]=255&a,t.last_lit++,0===e?t.dyn_ltree[2*a]++:(t.matches++,e--,t.dyn_ltree[2*(at[a]+T+1)]++,t.dyn_dtree[2*s(e)]++),t.last_lit===t.lit_bufsize-1},a._tr_align=function(t){l(t,D<<1,3),h(t,Y,$),f(t)}},{"../utils/common":3}],15:[function(t,e,a){"use strict";e.exports=function(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}},{}],"/":[function(t,e,a){"use strict";var i={};(0,t("./lib/utils/common").assign)(i,t("./lib/deflate"),t("./lib/inflate"),t("./lib/zlib/constants")),e.exports=i},{"./lib/deflate":1,"./lib/inflate":2,"./lib/utils/common":3,"./lib/zlib/constants":6}]},{},[])("/")});'use strict';tr.exportTo('tr.e.importer',function(){const GZIP_MEMBER_HEADER_ID_SIZE=3;const GZIP_HEADER_ID1=0x1f;const GZIP_HEADER_ID2=0x8b;const GZIP_DEFLATE_COMPRESSION=8;function _stringToUInt8Array(str){const array=new Uint8Array(str.length);for(let i=0;i<str.length;++i){array[i]=str.charCodeAt(i);} +return{InMemoryTraceStream,};});!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).pako=t()}}(function(){return function t(e,a,i){function n(s,o){if(!a[s]){if(!e[s]){var l="function"==typeof require&&require;if(!o&&l)return l(s,!0);if(r)return r(s,!0);var h=new Error("Cannot find module '"+s+"'");throw h.code="MODULE_NOT_FOUND",h}var d=a[s]={exports:{}};e[s][0].call(d.exports,function(t){var a=e[s][1][t];return n(a||t)},d,d.exports,t,e,a,i)}return a[s].exports}for(var r="function"==typeof require&&require,s=0;s<i.length;s++)n(i[s]);return n}({1:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({level:_,method:c,chunkSize:16384,windowBits:15,memLevel:8,strategy:u,to:""},t||{});var e=this.options;e.raw&&e.windowBits>0?e.windowBits=-e.windowBits:e.gzip&&e.windowBits>0&&e.windowBits<16&&(e.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new h,this.strm.avail_out=0;var a=r.deflateInit2(this.strm,e.level,e.method,e.windowBits,e.memLevel,e.strategy);if(a!==f)throw new Error(l[a]);if(e.header&&r.deflateSetHeader(this.strm,e.header),e.dictionary){var n;if(n="string"==typeof e.dictionary?o.string2buf(e.dictionary):"[object ArrayBuffer]"===d.call(e.dictionary)?new Uint8Array(e.dictionary):e.dictionary,(a=r.deflateSetDictionary(this.strm,n))!==f)throw new Error(l[a]);this._dict_set=!0}}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||l[a.err];return a.result}var r=t("./zlib/deflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/messages"),h=t("./zlib/zstream"),d=Object.prototype.toString,f=0,_=-1,u=0,c=8;i.prototype.push=function(t,e){var a,i,n=this.strm,l=this.options.chunkSize;if(this.ended)return!1;i=e===~~e?e:!0===e?4:0,"string"==typeof t?n.input=o.string2buf(t):"[object ArrayBuffer]"===d.call(t)?n.input=new Uint8Array(t):n.input=t,n.next_in=0,n.avail_in=n.input.length;do{if(0===n.avail_out&&(n.output=new s.Buf8(l),n.next_out=0,n.avail_out=l),1!==(a=r.deflate(n,i))&&a!==f)return this.onEnd(a),this.ended=!0,!1;0!==n.avail_out&&(0!==n.avail_in||4!==i&&2!==i)||("string"===this.options.to?this.onData(o.buf2binstring(s.shrinkBuf(n.output,n.next_out))):this.onData(s.shrinkBuf(n.output,n.next_out)))}while((n.avail_in>0||0===n.avail_out)&&1!==a);return 4===i?(a=r.deflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===f):2!==i||(this.onEnd(f),n.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===f&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Deflate=i,a.deflate=n,a.deflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.gzip=function(t,e){return e=e||{},e.gzip=!0,n(t,e)}},{"./utils/common":3,"./utils/strings":4,"./zlib/deflate":8,"./zlib/messages":13,"./zlib/zstream":15}],2:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({chunkSize:16384,windowBits:0,to:""},t||{});var e=this.options;e.raw&&e.windowBits>=0&&e.windowBits<16&&(e.windowBits=-e.windowBits,0===e.windowBits&&(e.windowBits=-15)),!(e.windowBits>=0&&e.windowBits<16)||t&&t.windowBits||(e.windowBits+=32),e.windowBits>15&&e.windowBits<48&&0==(15&e.windowBits)&&(e.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new d,this.strm.avail_out=0;var a=r.inflateInit2(this.strm,e.windowBits);if(a!==l.Z_OK)throw new Error(h[a]);this.header=new f,r.inflateGetHeader(this.strm,this.header)}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||h[a.err];return a.result}var r=t("./zlib/inflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/constants"),h=t("./zlib/messages"),d=t("./zlib/zstream"),f=t("./zlib/gzheader"),_=Object.prototype.toString;i.prototype.push=function(t,e){var a,i,n,h,d,f,u=this.strm,c=this.options.chunkSize,b=this.options.dictionary,g=!1;if(this.ended)return!1;i=e===~~e?e:!0===e?l.Z_FINISH:l.Z_NO_FLUSH,"string"==typeof t?u.input=o.binstring2buf(t):"[object ArrayBuffer]"===_.call(t)?u.input=new Uint8Array(t):u.input=t,u.next_in=0,u.avail_in=u.input.length;do{if(0===u.avail_out&&(u.output=new s.Buf8(c),u.next_out=0,u.avail_out=c),(a=r.inflate(u,l.Z_NO_FLUSH))===l.Z_NEED_DICT&&b&&(f="string"==typeof b?o.string2buf(b):"[object ArrayBuffer]"===_.call(b)?new Uint8Array(b):b,a=r.inflateSetDictionary(this.strm,f)),a===l.Z_BUF_ERROR&&!0===g&&(a=l.Z_OK,g=!1),a!==l.Z_STREAM_END&&a!==l.Z_OK)return this.onEnd(a),this.ended=!0,!1;u.next_out&&(0!==u.avail_out&&a!==l.Z_STREAM_END&&(0!==u.avail_in||i!==l.Z_FINISH&&i!==l.Z_SYNC_FLUSH)||("string"===this.options.to?(n=o.utf8border(u.output,u.next_out),h=u.next_out-n,d=o.buf2string(u.output,n),u.next_out=h,u.avail_out=c-h,h&&s.arraySet(u.output,u.output,n,h,0),this.onData(d)):this.onData(s.shrinkBuf(u.output,u.next_out)))),0===u.avail_in&&0===u.avail_out&&(g=!0)}while((u.avail_in>0||0===u.avail_out)&&a!==l.Z_STREAM_END);return a===l.Z_STREAM_END&&(i=l.Z_FINISH),i===l.Z_FINISH?(a=r.inflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===l.Z_OK):i!==l.Z_SYNC_FLUSH||(this.onEnd(l.Z_OK),u.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===l.Z_OK&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Inflate=i,a.inflate=n,a.inflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.ungzip=n},{"./utils/common":3,"./utils/strings":4,"./zlib/constants":6,"./zlib/gzheader":9,"./zlib/inflate":11,"./zlib/messages":13,"./zlib/zstream":15}],3:[function(t,e,a){"use strict";function i(t,e){return Object.prototype.hasOwnProperty.call(t,e)}var n="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;a.assign=function(t){for(var e=Array.prototype.slice.call(arguments,1);e.length;){var a=e.shift();if(a){if("object"!=typeof a)throw new TypeError(a+"must be non-object");for(var n in a)i(a,n)&&(t[n]=a[n])}}return t},a.shrinkBuf=function(t,e){return t.length===e?t:t.subarray?t.subarray(0,e):(t.length=e,t)};var r={arraySet:function(t,e,a,i,n){if(e.subarray&&t.subarray)t.set(e.subarray(a,a+i),n);else for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){var e,a,i,n,r,s;for(i=0,e=0,a=t.length;e<a;e++)i+=t[e].length;for(s=new Uint8Array(i),n=0,e=0,a=t.length;e<a;e++)r=t[e],s.set(r,n),n+=r.length;return s}},s={arraySet:function(t,e,a,i,n){for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){return[].concat.apply([],t)}};a.setTyped=function(t){t?(a.Buf8=Uint8Array,a.Buf16=Uint16Array,a.Buf32=Int32Array,a.assign(a,r)):(a.Buf8=Array,a.Buf16=Array,a.Buf32=Array,a.assign(a,s))},a.setTyped(n)},{}],4:[function(t,e,a){"use strict";function i(t,e){if(e<65537&&(t.subarray&&s||!t.subarray&&r))return String.fromCharCode.apply(null,n.shrinkBuf(t,e));for(var a="",i=0;i<e;i++)a+=String.fromCharCode(t[i]);return a}var n=t("./common"),r=!0,s=!0;try{String.fromCharCode.apply(null,[0])}catch(t){r=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(t){s=!1}for(var o=new n.Buf8(256),l=0;l<256;l++)o[l]=l>=252?6:l>=248?5:l>=240?4:l>=224?3:l>=192?2:1;o[254]=o[254]=1,a.string2buf=function(t){var e,a,i,r,s,o=t.length,l=0;for(r=0;r<o;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),l+=a<128?1:a<2048?2:a<65536?3:4;for(e=new n.Buf8(l),s=0,r=0;s<l;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),a<128?e[s++]=a:a<2048?(e[s++]=192|a>>>6,e[s++]=128|63&a):a<65536?(e[s++]=224|a>>>12,e[s++]=128|a>>>6&63,e[s++]=128|63&a):(e[s++]=240|a>>>18,e[s++]=128|a>>>12&63,e[s++]=128|a>>>6&63,e[s++]=128|63&a);return e},a.buf2binstring=function(t){return i(t,t.length)},a.binstring2buf=function(t){for(var e=new n.Buf8(t.length),a=0,i=e.length;a<i;a++)e[a]=t.charCodeAt(a);return e},a.buf2string=function(t,e){var a,n,r,s,l=e||t.length,h=new Array(2*l);for(n=0,a=0;a<l;)if((r=t[a++])<128)h[n++]=r;else if((s=o[r])>4)h[n++]=65533,a+=s-1;else{for(r&=2===s?31:3===s?15:7;s>1&&a<l;)r=r<<6|63&t[a++],s--;s>1?h[n++]=65533:r<65536?h[n++]=r:(r-=65536,h[n++]=55296|r>>10&1023,h[n++]=56320|1023&r)}return i(h,n)},a.utf8border=function(t,e){var a;for((e=e||t.length)>t.length&&(e=t.length),a=e-1;a>=0&&128==(192&t[a]);)a--;return a<0?e:0===a?e:a+o[t[a]]>e?a:e}},{"./common":3}],5:[function(t,e,a){"use strict";e.exports=function(t,e,a,i){for(var n=65535&t|0,r=t>>>16&65535|0,s=0;0!==a;){a-=s=a>2e3?2e3:a;do{r=r+(n=n+e[i++]|0)|0}while(--s);n%=65521,r%=65521}return n|r<<16|0}},{}],6:[function(t,e,a){"use strict";e.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],7:[function(t,e,a){"use strict";var i=function(){for(var t,e=[],a=0;a<256;a++){t=a;for(var i=0;i<8;i++)t=1&t?3988292384^t>>>1:t>>>1;e[a]=t}return e}();e.exports=function(t,e,a,n){var r=i,s=n+a;t^=-1;for(var o=n;o<s;o++)t=t>>>8^r[255&(t^e[o])];return-1^t}},{}],8:[function(t,e,a){"use strict";function i(t,e){return t.msg=A[e],e}function n(t){return(t<<1)-(t>4?9:0)}function r(t){for(var e=t.length;--e>=0;)t[e]=0}function s(t){var e=t.state,a=e.pending;a>t.avail_out&&(a=t.avail_out),0!==a&&(z.arraySet(t.output,e.pending_buf,e.pending_out,a,t.next_out),t.next_out+=a,e.pending_out+=a,t.total_out+=a,t.avail_out-=a,e.pending-=a,0===e.pending&&(e.pending_out=0))}function o(t,e){B._tr_flush_block(t,t.block_start>=0?t.block_start:-1,t.strstart-t.block_start,e),t.block_start=t.strstart,s(t.strm)}function l(t,e){t.pending_buf[t.pending++]=e}function h(t,e){t.pending_buf[t.pending++]=e>>>8&255,t.pending_buf[t.pending++]=255&e}function d(t,e,a,i){var n=t.avail_in;return n>i&&(n=i),0===n?0:(t.avail_in-=n,z.arraySet(e,t.input,t.next_in,n,a),1===t.state.wrap?t.adler=S(t.adler,e,n,a):2===t.state.wrap&&(t.adler=E(t.adler,e,n,a)),t.next_in+=n,t.total_in+=n,n)}function f(t,e){var a,i,n=t.max_chain_length,r=t.strstart,s=t.prev_length,o=t.nice_match,l=t.strstart>t.w_size-it?t.strstart-(t.w_size-it):0,h=t.window,d=t.w_mask,f=t.prev,_=t.strstart+at,u=h[r+s-1],c=h[r+s];t.prev_length>=t.good_match&&(n>>=2),o>t.lookahead&&(o=t.lookahead);do{if(a=e,h[a+s]===c&&h[a+s-1]===u&&h[a]===h[r]&&h[++a]===h[r+1]){r+=2,a++;do{}while(h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&r<_);if(i=at-(_-r),r=_-at,i>s){if(t.match_start=e,s=i,i>=o)break;u=h[r+s-1],c=h[r+s]}}}while((e=f[e&d])>l&&0!=--n);return s<=t.lookahead?s:t.lookahead}function _(t){var e,a,i,n,r,s=t.w_size;do{if(n=t.window_size-t.lookahead-t.strstart,t.strstart>=s+(s-it)){z.arraySet(t.window,t.window,s,s,0),t.match_start-=s,t.strstart-=s,t.block_start-=s,e=a=t.hash_size;do{i=t.head[--e],t.head[e]=i>=s?i-s:0}while(--a);e=a=s;do{i=t.prev[--e],t.prev[e]=i>=s?i-s:0}while(--a);n+=s}if(0===t.strm.avail_in)break;if(a=d(t.strm,t.window,t.strstart+t.lookahead,n),t.lookahead+=a,t.lookahead+t.insert>=et)for(r=t.strstart-t.insert,t.ins_h=t.window[r],t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+1])&t.hash_mask;t.insert&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+et-1])&t.hash_mask,t.prev[r&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=r,r++,t.insert--,!(t.lookahead+t.insert<et)););}while(t.lookahead<it&&0!==t.strm.avail_in)}function u(t,e){for(var a,i;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),0!==a&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a)),t.match_length>=et)if(i=B._tr_tally(t,t.strstart-t.match_start,t.match_length-et),t.lookahead-=t.match_length,t.match_length<=t.max_lazy_match&&t.lookahead>=et){t.match_length--;do{t.strstart++,t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart}while(0!=--t.match_length);t.strstart++}else t.strstart+=t.match_length,t.match_length=0,t.ins_h=t.window[t.strstart],t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+1])&t.hash_mask;else i=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++;if(i&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function c(t,e){for(var a,i,n;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),t.prev_length=t.match_length,t.prev_match=t.match_start,t.match_length=et-1,0!==a&&t.prev_length<t.max_lazy_match&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a),t.match_length<=5&&(t.strategy===H||t.match_length===et&&t.strstart-t.match_start>4096)&&(t.match_length=et-1)),t.prev_length>=et&&t.match_length<=t.prev_length){n=t.strstart+t.lookahead-et,i=B._tr_tally(t,t.strstart-1-t.prev_match,t.prev_length-et),t.lookahead-=t.prev_length-1,t.prev_length-=2;do{++t.strstart<=n&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart)}while(0!=--t.prev_length);if(t.match_available=0,t.match_length=et-1,t.strstart++,i&&(o(t,!1),0===t.strm.avail_out))return _t}else if(t.match_available){if((i=B._tr_tally(t,0,t.window[t.strstart-1]))&&o(t,!1),t.strstart++,t.lookahead--,0===t.strm.avail_out)return _t}else t.match_available=1,t.strstart++,t.lookahead--}return t.match_available&&(i=B._tr_tally(t,0,t.window[t.strstart-1]),t.match_available=0),t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function b(t,e){for(var a,i,n,r,s=t.window;;){if(t.lookahead<=at){if(_(t),t.lookahead<=at&&e===Z)return _t;if(0===t.lookahead)break}if(t.match_length=0,t.lookahead>=et&&t.strstart>0&&(n=t.strstart-1,(i=s[n])===s[++n]&&i===s[++n]&&i===s[++n])){r=t.strstart+at;do{}while(i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&n<r);t.match_length=at-(r-n),t.match_length>t.lookahead&&(t.match_length=t.lookahead)}if(t.match_length>=et?(a=B._tr_tally(t,1,t.match_length-et),t.lookahead-=t.match_length,t.strstart+=t.match_length,t.match_length=0):(a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++),a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function g(t,e){for(var a;;){if(0===t.lookahead&&(_(t),0===t.lookahead)){if(e===Z)return _t;break}if(t.match_length=0,a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++,a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function m(t,e,a,i,n){this.good_length=t,this.max_lazy=e,this.nice_length=a,this.max_chain=i,this.func=n}function w(t){t.window_size=2*t.w_size,r(t.head),t.max_lazy_match=x[t.level].max_lazy,t.good_match=x[t.level].good_length,t.nice_match=x[t.level].nice_length,t.max_chain_length=x[t.level].max_chain,t.strstart=0,t.block_start=0,t.lookahead=0,t.insert=0,t.match_length=t.prev_length=et-1,t.match_available=0,t.ins_h=0}function p(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=q,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new z.Buf16(2*$),this.dyn_dtree=new z.Buf16(2*(2*Q+1)),this.bl_tree=new z.Buf16(2*(2*V+1)),r(this.dyn_ltree),r(this.dyn_dtree),r(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new z.Buf16(tt+1),this.heap=new z.Buf16(2*J+1),r(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new z.Buf16(2*J+1),r(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function v(t){var e;return t&&t.state?(t.total_in=t.total_out=0,t.data_type=Y,e=t.state,e.pending=0,e.pending_out=0,e.wrap<0&&(e.wrap=-e.wrap),e.status=e.wrap?rt:dt,t.adler=2===e.wrap?0:1,e.last_flush=Z,B._tr_init(e),D):i(t,U)}function k(t){var e=v(t);return e===D&&w(t.state),e}function y(t,e,a,n,r,s){if(!t)return U;var o=1;if(e===L&&(e=6),n<0?(o=0,n=-n):n>15&&(o=2,n-=16),r<1||r>G||a!==q||n<8||n>15||e<0||e>9||s<0||s>M)return i(t,U);8===n&&(n=9);var l=new p;return t.state=l,l.strm=t,l.wrap=o,l.gzhead=null,l.w_bits=n,l.w_size=1<<l.w_bits,l.w_mask=l.w_size-1,l.hash_bits=r+7,l.hash_size=1<<l.hash_bits,l.hash_mask=l.hash_size-1,l.hash_shift=~~((l.hash_bits+et-1)/et),l.window=new z.Buf8(2*l.w_size),l.head=new z.Buf16(l.hash_size),l.prev=new z.Buf16(l.w_size),l.lit_bufsize=1<<r+6,l.pending_buf_size=4*l.lit_bufsize,l.pending_buf=new z.Buf8(l.pending_buf_size),l.d_buf=1*l.lit_bufsize,l.l_buf=3*l.lit_bufsize,l.level=e,l.strategy=s,l.method=a,k(t)}var x,z=t("../utils/common"),B=t("./trees"),S=t("./adler32"),E=t("./crc32"),A=t("./messages"),Z=0,R=1,C=3,N=4,O=5,D=0,I=1,U=-2,T=-3,F=-5,L=-1,H=1,j=2,K=3,M=4,P=0,Y=2,q=8,G=9,X=15,W=8,J=286,Q=30,V=19,$=2*J+1,tt=15,et=3,at=258,it=at+et+1,nt=32,rt=42,st=69,ot=73,lt=91,ht=103,dt=113,ft=666,_t=1,ut=2,ct=3,bt=4,gt=3;x=[new m(0,0,0,0,function(t,e){var a=65535;for(a>t.pending_buf_size-5&&(a=t.pending_buf_size-5);;){if(t.lookahead<=1){if(_(t),0===t.lookahead&&e===Z)return _t;if(0===t.lookahead)break}t.strstart+=t.lookahead,t.lookahead=0;var i=t.block_start+a;if((0===t.strstart||t.strstart>=i)&&(t.lookahead=t.strstart-i,t.strstart=i,o(t,!1),0===t.strm.avail_out))return _t;if(t.strstart-t.block_start>=t.w_size-it&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):(t.strstart>t.block_start&&(o(t,!1),t.strm.avail_out),_t)}),new m(4,4,8,4,u),new m(4,5,16,8,u),new m(4,6,32,32,u),new m(4,4,16,16,c),new m(8,16,32,32,c),new m(8,16,128,128,c),new m(8,32,128,256,c),new m(32,128,258,1024,c),new m(32,258,258,4096,c)],a.deflateInit=function(t,e){return y(t,e,q,X,W,P)},a.deflateInit2=y,a.deflateReset=k,a.deflateResetKeep=v,a.deflateSetHeader=function(t,e){return t&&t.state?2!==t.state.wrap?U:(t.state.gzhead=e,D):U},a.deflate=function(t,e){var a,o,d,f;if(!t||!t.state||e>O||e<0)return t?i(t,U):U;if(o=t.state,!t.output||!t.input&&0!==t.avail_in||o.status===ft&&e!==N)return i(t,0===t.avail_out?F:U);if(o.strm=t,a=o.last_flush,o.last_flush=e,o.status===rt)if(2===o.wrap)t.adler=0,l(o,31),l(o,139),l(o,8),o.gzhead?(l(o,(o.gzhead.text?1:0)+(o.gzhead.hcrc?2:0)+(o.gzhead.extra?4:0)+(o.gzhead.name?8:0)+(o.gzhead.comment?16:0)),l(o,255&o.gzhead.time),l(o,o.gzhead.time>>8&255),l(o,o.gzhead.time>>16&255),l(o,o.gzhead.time>>24&255),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,255&o.gzhead.os),o.gzhead.extra&&o.gzhead.extra.length&&(l(o,255&o.gzhead.extra.length),l(o,o.gzhead.extra.length>>8&255)),o.gzhead.hcrc&&(t.adler=E(t.adler,o.pending_buf,o.pending,0)),o.gzindex=0,o.status=st):(l(o,0),l(o,0),l(o,0),l(o,0),l(o,0),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,gt),o.status=dt);else{var _=q+(o.w_bits-8<<4)<<8;_|=(o.strategy>=j||o.level<2?0:o.level<6?1:6===o.level?2:3)<<6,0!==o.strstart&&(_|=nt),_+=31-_%31,o.status=dt,h(o,_),0!==o.strstart&&(h(o,t.adler>>>16),h(o,65535&t.adler)),t.adler=1}if(o.status===st)if(o.gzhead.extra){for(d=o.pending;o.gzindex<(65535&o.gzhead.extra.length)&&(o.pending!==o.pending_buf_size||(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending!==o.pending_buf_size));)l(o,255&o.gzhead.extra[o.gzindex]),o.gzindex++;o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),o.gzindex===o.gzhead.extra.length&&(o.gzindex=0,o.status=ot)}else o.status=ot;if(o.status===ot)if(o.gzhead.name){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.name.length?255&o.gzhead.name.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.gzindex=0,o.status=lt)}else o.status=lt;if(o.status===lt)if(o.gzhead.comment){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.comment.length?255&o.gzhead.comment.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.status=ht)}else o.status=ht;if(o.status===ht&&(o.gzhead.hcrc?(o.pending+2>o.pending_buf_size&&s(t),o.pending+2<=o.pending_buf_size&&(l(o,255&t.adler),l(o,t.adler>>8&255),t.adler=0,o.status=dt)):o.status=dt),0!==o.pending){if(s(t),0===t.avail_out)return o.last_flush=-1,D}else if(0===t.avail_in&&n(e)<=n(a)&&e!==N)return i(t,F);if(o.status===ft&&0!==t.avail_in)return i(t,F);if(0!==t.avail_in||0!==o.lookahead||e!==Z&&o.status!==ft){var u=o.strategy===j?g(o,e):o.strategy===K?b(o,e):x[o.level].func(o,e);if(u!==ct&&u!==bt||(o.status=ft),u===_t||u===ct)return 0===t.avail_out&&(o.last_flush=-1),D;if(u===ut&&(e===R?B._tr_align(o):e!==O&&(B._tr_stored_block(o,0,0,!1),e===C&&(r(o.head),0===o.lookahead&&(o.strstart=0,o.block_start=0,o.insert=0))),s(t),0===t.avail_out))return o.last_flush=-1,D}return e!==N?D:o.wrap<=0?I:(2===o.wrap?(l(o,255&t.adler),l(o,t.adler>>8&255),l(o,t.adler>>16&255),l(o,t.adler>>24&255),l(o,255&t.total_in),l(o,t.total_in>>8&255),l(o,t.total_in>>16&255),l(o,t.total_in>>24&255)):(h(o,t.adler>>>16),h(o,65535&t.adler)),s(t),o.wrap>0&&(o.wrap=-o.wrap),0!==o.pending?D:I)},a.deflateEnd=function(t){var e;return t&&t.state?(e=t.state.status)!==rt&&e!==st&&e!==ot&&e!==lt&&e!==ht&&e!==dt&&e!==ft?i(t,U):(t.state=null,e===dt?i(t,T):D):U},a.deflateSetDictionary=function(t,e){var a,i,n,s,o,l,h,d,f=e.length;if(!t||!t.state)return U;if(a=t.state,2===(s=a.wrap)||1===s&&a.status!==rt||a.lookahead)return U;for(1===s&&(t.adler=S(t.adler,e,f,0)),a.wrap=0,f>=a.w_size&&(0===s&&(r(a.head),a.strstart=0,a.block_start=0,a.insert=0),d=new z.Buf8(a.w_size),z.arraySet(d,e,f-a.w_size,a.w_size,0),e=d,f=a.w_size),o=t.avail_in,l=t.next_in,h=t.input,t.avail_in=f,t.next_in=0,t.input=e,_(a);a.lookahead>=et;){i=a.strstart,n=a.lookahead-(et-1);do{a.ins_h=(a.ins_h<<a.hash_shift^a.window[i+et-1])&a.hash_mask,a.prev[i&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=i,i++}while(--n);a.strstart=i,a.lookahead=et-1,_(a)}return a.strstart+=a.lookahead,a.block_start=a.strstart,a.insert=a.lookahead,a.lookahead=0,a.match_length=a.prev_length=et-1,a.match_available=0,t.next_in=l,t.input=h,t.avail_in=o,a.wrap=s,D},a.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./messages":13,"./trees":14}],9:[function(t,e,a){"use strict";e.exports=function(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}},{}],10:[function(t,e,a){"use strict";e.exports=function(t,e){var a,i,n,r,s,o,l,h,d,f,_,u,c,b,g,m,w,p,v,k,y,x,z,B,S;a=t.state,i=t.next_in,B=t.input,n=i+(t.avail_in-5),r=t.next_out,S=t.output,s=r-(e-t.avail_out),o=r+(t.avail_out-257),l=a.dmax,h=a.wsize,d=a.whave,f=a.wnext,_=a.window,u=a.hold,c=a.bits,b=a.lencode,g=a.distcode,m=(1<<a.lenbits)-1,w=(1<<a.distbits)-1;t:do{c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=b[u&m];e:for(;;){if(v=p>>>24,u>>>=v,c-=v,0===(v=p>>>16&255))S[r++]=65535&p;else{if(!(16&v)){if(0==(64&v)){p=b[(65535&p)+(u&(1<<v)-1)];continue e}if(32&v){a.mode=12;break t}t.msg="invalid literal/length code",a.mode=30;break t}k=65535&p,(v&=15)&&(c<v&&(u+=B[i++]<<c,c+=8),k+=u&(1<<v)-1,u>>>=v,c-=v),c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=g[u&w];a:for(;;){if(v=p>>>24,u>>>=v,c-=v,!(16&(v=p>>>16&255))){if(0==(64&v)){p=g[(65535&p)+(u&(1<<v)-1)];continue a}t.msg="invalid distance code",a.mode=30;break t}if(y=65535&p,v&=15,c<v&&(u+=B[i++]<<c,(c+=8)<v&&(u+=B[i++]<<c,c+=8)),(y+=u&(1<<v)-1)>l){t.msg="invalid distance too far back",a.mode=30;break t}if(u>>>=v,c-=v,v=r-s,y>v){if((v=y-v)>d&&a.sane){t.msg="invalid distance too far back",a.mode=30;break t}if(x=0,z=_,0===f){if(x+=h-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}else if(f<v){if(x+=h+f-v,(v-=f)<k){k-=v;do{S[r++]=_[x++]}while(--v);if(x=0,f<k){k-=v=f;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}}else if(x+=f-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}for(;k>2;)S[r++]=z[x++],S[r++]=z[x++],S[r++]=z[x++],k-=3;k&&(S[r++]=z[x++],k>1&&(S[r++]=z[x++]))}else{x=r-y;do{S[r++]=S[x++],S[r++]=S[x++],S[r++]=S[x++],k-=3}while(k>2);k&&(S[r++]=S[x++],k>1&&(S[r++]=S[x++]))}break}}break}}while(i<n&&r<o);i-=k=c>>3,u&=(1<<(c-=k<<3))-1,t.next_in=i,t.next_out=r,t.avail_in=i<n?n-i+5:5-(i-n),t.avail_out=r<o?o-r+257:257-(r-o),a.hold=u,a.bits=c}},{}],11:[function(t,e,a){"use strict";function i(t){return(t>>>24&255)+(t>>>8&65280)+((65280&t)<<8)+((255&t)<<24)}function n(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new u.Buf16(320),this.work=new u.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function r(t){var e;return t&&t.state?(e=t.state,t.total_in=t.total_out=e.total=0,t.msg="",e.wrap&&(t.adler=1&e.wrap),e.mode=N,e.last=0,e.havedict=0,e.dmax=32768,e.head=null,e.hold=0,e.bits=0,e.lencode=e.lendyn=new u.Buf32(dt),e.distcode=e.distdyn=new u.Buf32(ft),e.sane=1,e.back=-1,z):E}function s(t){var e;return t&&t.state?(e=t.state,e.wsize=0,e.whave=0,e.wnext=0,r(t)):E}function o(t,e){var a,i;return t&&t.state?(i=t.state,e<0?(a=0,e=-e):(a=1+(e>>4),e<48&&(e&=15)),e&&(e<8||e>15)?E:(null!==i.window&&i.wbits!==e&&(i.window=null),i.wrap=a,i.wbits=e,s(t))):E}function l(t,e){var a,i;return t?(i=new n,t.state=i,i.window=null,(a=o(t,e))!==z&&(t.state=null),a):E}function h(t){if(ut){var e;for(f=new u.Buf32(512),_=new u.Buf32(32),e=0;e<144;)t.lens[e++]=8;for(;e<256;)t.lens[e++]=9;for(;e<280;)t.lens[e++]=7;for(;e<288;)t.lens[e++]=8;for(m(p,t.lens,0,288,f,0,t.work,{bits:9}),e=0;e<32;)t.lens[e++]=5;m(v,t.lens,0,32,_,0,t.work,{bits:5}),ut=!1}t.lencode=f,t.lenbits=9,t.distcode=_,t.distbits=5}function d(t,e,a,i){var n,r=t.state;return null===r.window&&(r.wsize=1<<r.wbits,r.wnext=0,r.whave=0,r.window=new u.Buf8(r.wsize)),i>=r.wsize?(u.arraySet(r.window,e,a-r.wsize,r.wsize,0),r.wnext=0,r.whave=r.wsize):((n=r.wsize-r.wnext)>i&&(n=i),u.arraySet(r.window,e,a-i,n,r.wnext),(i-=n)?(u.arraySet(r.window,e,a-i,i,0),r.wnext=i,r.whave=r.wsize):(r.wnext+=n,r.wnext===r.wsize&&(r.wnext=0),r.whave<r.wsize&&(r.whave+=n))),0}var f,_,u=t("../utils/common"),c=t("./adler32"),b=t("./crc32"),g=t("./inffast"),m=t("./inftrees"),w=0,p=1,v=2,k=4,y=5,x=6,z=0,B=1,S=2,E=-2,A=-3,Z=-4,R=-5,C=8,N=1,O=2,D=3,I=4,U=5,T=6,F=7,L=8,H=9,j=10,K=11,M=12,P=13,Y=14,q=15,G=16,X=17,W=18,J=19,Q=20,V=21,$=22,tt=23,et=24,at=25,it=26,nt=27,rt=28,st=29,ot=30,lt=31,ht=32,dt=852,ft=592,_t=15,ut=!0;a.inflateReset=s,a.inflateReset2=o,a.inflateResetKeep=r,a.inflateInit=function(t){return l(t,_t)},a.inflateInit2=l,a.inflate=function(t,e){var a,n,r,s,o,l,f,_,dt,ft,_t,ut,ct,bt,gt,mt,wt,pt,vt,kt,yt,xt,zt,Bt,St=0,Et=new u.Buf8(4),At=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!t||!t.state||!t.output||!t.input&&0!==t.avail_in)return E;(a=t.state).mode===M&&(a.mode=P),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,ft=l,_t=f,xt=z;t:for(;;)switch(a.mode){case N:if(0===a.wrap){a.mode=P;break}for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(2&a.wrap&&35615===_){a.check=0,Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0),_=0,dt=0,a.mode=O;break}if(a.flags=0,a.head&&(a.head.done=!1),!(1&a.wrap)||(((255&_)<<8)+(_>>8))%31){t.msg="incorrect header check",a.mode=ot;break}if((15&_)!==C){t.msg="unknown compression method",a.mode=ot;break}if(_>>>=4,dt-=4,yt=8+(15&_),0===a.wbits)a.wbits=yt;else if(yt>a.wbits){t.msg="invalid window size",a.mode=ot;break}a.dmax=1<<yt,t.adler=a.check=1,a.mode=512&_?j:M,_=0,dt=0;break;case O:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.flags=_,(255&a.flags)!==C){t.msg="unknown compression method",a.mode=ot;break}if(57344&a.flags){t.msg="unknown header flags set",a.mode=ot;break}a.head&&(a.head.text=_>>8&1),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=D;case D:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.time=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,Et[2]=_>>>16&255,Et[3]=_>>>24&255,a.check=b(a.check,Et,4,0)),_=0,dt=0,a.mode=I;case I:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.xflags=255&_,a.head.os=_>>8),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=U;case U:if(1024&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length=_,a.head&&(a.head.extra_len=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0}else a.head&&(a.head.extra=null);a.mode=T;case T:if(1024&a.flags&&((ut=a.length)>l&&(ut=l),ut&&(a.head&&(yt=a.head.extra_len-a.length,a.head.extra||(a.head.extra=new Array(a.head.extra_len)),u.arraySet(a.head.extra,n,s,ut,yt)),512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,a.length-=ut),a.length))break t;a.length=0,a.mode=F;case F:if(2048&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.name+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.name=null);a.length=0,a.mode=L;case L:if(4096&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.comment+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.comment=null);a.mode=H;case H:if(512&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(65535&a.check)){t.msg="header crc mismatch",a.mode=ot;break}_=0,dt=0}a.head&&(a.head.hcrc=a.flags>>9&1,a.head.done=!0),t.adler=a.check=0,a.mode=M;break;case j:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}t.adler=a.check=i(_),_=0,dt=0,a.mode=K;case K:if(0===a.havedict)return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,S;t.adler=a.check=1,a.mode=M;case M:if(e===y||e===x)break t;case P:if(a.last){_>>>=7&dt,dt-=7&dt,a.mode=nt;break}for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}switch(a.last=1&_,_>>>=1,dt-=1,3&_){case 0:a.mode=Y;break;case 1:if(h(a),a.mode=Q,e===x){_>>>=2,dt-=2;break t}break;case 2:a.mode=X;break;case 3:t.msg="invalid block type",a.mode=ot}_>>>=2,dt-=2;break;case Y:for(_>>>=7&dt,dt-=7&dt;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if((65535&_)!=(_>>>16^65535)){t.msg="invalid stored block lengths",a.mode=ot;break}if(a.length=65535&_,_=0,dt=0,a.mode=q,e===x)break t;case q:a.mode=G;case G:if(ut=a.length){if(ut>l&&(ut=l),ut>f&&(ut=f),0===ut)break t;u.arraySet(r,n,s,ut,o),l-=ut,s+=ut,f-=ut,o+=ut,a.length-=ut;break}a.mode=M;break;case X:for(;dt<14;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.nlen=257+(31&_),_>>>=5,dt-=5,a.ndist=1+(31&_),_>>>=5,dt-=5,a.ncode=4+(15&_),_>>>=4,dt-=4,a.nlen>286||a.ndist>30){t.msg="too many length or distance symbols",a.mode=ot;break}a.have=0,a.mode=W;case W:for(;a.have<a.ncode;){for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.lens[At[a.have++]]=7&_,_>>>=3,dt-=3}for(;a.have<19;)a.lens[At[a.have++]]=0;if(a.lencode=a.lendyn,a.lenbits=7,zt={bits:a.lenbits},xt=m(w,a.lens,0,19,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid code lengths set",a.mode=ot;break}a.have=0,a.mode=J;case J:for(;a.have<a.nlen+a.ndist;){for(;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(wt<16)_>>>=gt,dt-=gt,a.lens[a.have++]=wt;else{if(16===wt){for(Bt=gt+2;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_>>>=gt,dt-=gt,0===a.have){t.msg="invalid bit length repeat",a.mode=ot;break}yt=a.lens[a.have-1],ut=3+(3&_),_>>>=2,dt-=2}else if(17===wt){for(Bt=gt+3;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=3+(7&(_>>>=gt)),_>>>=3,dt-=3}else{for(Bt=gt+7;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=11+(127&(_>>>=gt)),_>>>=7,dt-=7}if(a.have+ut>a.nlen+a.ndist){t.msg="invalid bit length repeat",a.mode=ot;break}for(;ut--;)a.lens[a.have++]=yt}}if(a.mode===ot)break;if(0===a.lens[256]){t.msg="invalid code -- missing end-of-block",a.mode=ot;break}if(a.lenbits=9,zt={bits:a.lenbits},xt=m(p,a.lens,0,a.nlen,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid literal/lengths set",a.mode=ot;break}if(a.distbits=6,a.distcode=a.distdyn,zt={bits:a.distbits},xt=m(v,a.lens,a.nlen,a.ndist,a.distcode,0,a.work,zt),a.distbits=zt.bits,xt){t.msg="invalid distances set",a.mode=ot;break}if(a.mode=Q,e===x)break t;case Q:a.mode=V;case V:if(l>=6&&f>=258){t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,g(t,_t),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,a.mode===M&&(a.back=-1);break}for(a.back=0;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(mt&&0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.lencode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,a.length=wt,0===mt){a.mode=it;break}if(32&mt){a.back=-1,a.mode=M;break}if(64&mt){t.msg="invalid literal/length code",a.mode=ot;break}a.extra=15&mt,a.mode=$;case $:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}a.was=a.length,a.mode=tt;case tt:for(;St=a.distcode[_&(1<<a.distbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.distcode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,64&mt){t.msg="invalid distance code",a.mode=ot;break}a.offset=wt,a.extra=15&mt,a.mode=et;case et:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.offset+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}if(a.offset>a.dmax){t.msg="invalid distance too far back",a.mode=ot;break}a.mode=at;case at:if(0===f)break t;if(ut=_t-f,a.offset>ut){if((ut=a.offset-ut)>a.whave&&a.sane){t.msg="invalid distance too far back",a.mode=ot;break}ut>a.wnext?(ut-=a.wnext,ct=a.wsize-ut):ct=a.wnext-ut,ut>a.length&&(ut=a.length),bt=a.window}else bt=r,ct=o-a.offset,ut=a.length;ut>f&&(ut=f),f-=ut,a.length-=ut;do{r[o++]=bt[ct++]}while(--ut);0===a.length&&(a.mode=V);break;case it:if(0===f)break t;r[o++]=a.length,f--,a.mode=V;break;case nt:if(a.wrap){for(;dt<32;){if(0===l)break t;l--,_|=n[s++]<<dt,dt+=8}if(_t-=f,t.total_out+=_t,a.total+=_t,_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,o-_t):c(a.check,r,_t,o-_t)),_t=f,(a.flags?_:i(_))!==a.check){t.msg="incorrect data check",a.mode=ot;break}_=0,dt=0}a.mode=rt;case rt:if(a.wrap&&a.flags){for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(4294967295&a.total)){t.msg="incorrect length check",a.mode=ot;break}_=0,dt=0}a.mode=st;case st:xt=B;break t;case ot:xt=A;break t;case lt:return Z;case ht:default:return E}return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,(a.wsize||_t!==t.avail_out&&a.mode<ot&&(a.mode<nt||e!==k))&&d(t,t.output,t.next_out,_t-t.avail_out)?(a.mode=lt,Z):(ft-=t.avail_in,_t-=t.avail_out,t.total_in+=ft,t.total_out+=_t,a.total+=_t,a.wrap&&_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,t.next_out-_t):c(a.check,r,_t,t.next_out-_t)),t.data_type=a.bits+(a.last?64:0)+(a.mode===M?128:0)+(a.mode===Q||a.mode===q?256:0),(0===ft&&0===_t||e===k)&&xt===z&&(xt=R),xt)},a.inflateEnd=function(t){if(!t||!t.state)return E;var e=t.state;return e.window&&(e.window=null),t.state=null,z},a.inflateGetHeader=function(t,e){var a;return t&&t.state?0==(2&(a=t.state).wrap)?E:(a.head=e,e.done=!1,z):E},a.inflateSetDictionary=function(t,e){var a,i,n=e.length;return t&&t.state?0!==(a=t.state).wrap&&a.mode!==K?E:a.mode===K&&(i=1,(i=c(i,e,n,0))!==a.check)?A:d(t,e,n,n)?(a.mode=lt,Z):(a.havedict=1,z):E},a.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./inffast":10,"./inftrees":12}],12:[function(t,e,a){"use strict";var i=t("../utils/common"),n=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],r=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],s=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],o=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];e.exports=function(t,e,a,l,h,d,f,_){var u,c,b,g,m,w,p,v,k,y=_.bits,x=0,z=0,B=0,S=0,E=0,A=0,Z=0,R=0,C=0,N=0,O=null,D=0,I=new i.Buf16(16),U=new i.Buf16(16),T=null,F=0;for(x=0;x<=15;x++)I[x]=0;for(z=0;z<l;z++)I[e[a+z]]++;for(E=y,S=15;S>=1&&0===I[S];S--);if(E>S&&(E=S),0===S)return h[d++]=20971520,h[d++]=20971520,_.bits=1,0;for(B=1;B<S&&0===I[B];B++);for(E<B&&(E=B),R=1,x=1;x<=15;x++)if(R<<=1,(R-=I[x])<0)return-1;if(R>0&&(0===t||1!==S))return-1;for(U[1]=0,x=1;x<15;x++)U[x+1]=U[x]+I[x];for(z=0;z<l;z++)0!==e[a+z]&&(f[U[e[a+z]]++]=z);if(0===t?(O=T=f,w=19):1===t?(O=n,D-=257,T=r,F-=257,w=256):(O=s,T=o,w=-1),N=0,z=0,x=B,m=d,A=E,Z=0,b=-1,C=1<<E,g=C-1,1===t&&C>852||2===t&&C>592)return 1;for(;;){p=x-Z,f[z]<w?(v=0,k=f[z]):f[z]>w?(v=T[F+f[z]],k=O[D+f[z]]):(v=96,k=0),u=1<<x-Z,B=c=1<<A;do{h[m+(N>>Z)+(c-=u)]=p<<24|v<<16|k|0}while(0!==c);for(u=1<<x-1;N&u;)u>>=1;if(0!==u?(N&=u-1,N+=u):N=0,z++,0==--I[x]){if(x===S)break;x=e[a+f[z]]}if(x>E&&(N&g)!==b){for(0===Z&&(Z=E),m+=B,R=1<<(A=x-Z);A+Z<S&&!((R-=I[A+Z])<=0);)A++,R<<=1;if(C+=1<<A,1===t&&C>852||2===t&&C>592)return 1;h[b=N&g]=E<<24|A<<16|m-d|0}}return 0!==N&&(h[m+N]=x-Z<<24|64<<16|0),_.bits=E,0}},{"../utils/common":3}],13:[function(t,e,a){"use strict";e.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],14:[function(t,e,a){"use strict";function i(t){for(var e=t.length;--e>=0;)t[e]=0}function n(t,e,a,i,n){this.static_tree=t,this.extra_bits=e,this.extra_base=a,this.elems=i,this.max_length=n,this.has_stree=t&&t.length}function r(t,e){this.dyn_tree=t,this.max_code=0,this.stat_desc=e}function s(t){return t<256?et[t]:et[256+(t>>>7)]}function o(t,e){t.pending_buf[t.pending++]=255&e,t.pending_buf[t.pending++]=e>>>8&255}function l(t,e,a){t.bi_valid>M-a?(t.bi_buf|=e<<t.bi_valid&65535,o(t,t.bi_buf),t.bi_buf=e>>M-t.bi_valid,t.bi_valid+=a-M):(t.bi_buf|=e<<t.bi_valid&65535,t.bi_valid+=a)}function h(t,e,a){l(t,a[2*e],a[2*e+1])}function d(t,e){var a=0;do{a|=1&t,t>>>=1,a<<=1}while(--e>0);return a>>>1}function f(t){16===t.bi_valid?(o(t,t.bi_buf),t.bi_buf=0,t.bi_valid=0):t.bi_valid>=8&&(t.pending_buf[t.pending++]=255&t.bi_buf,t.bi_buf>>=8,t.bi_valid-=8)}function _(t,e){var a,i,n,r,s,o,l=e.dyn_tree,h=e.max_code,d=e.stat_desc.static_tree,f=e.stat_desc.has_stree,_=e.stat_desc.extra_bits,u=e.stat_desc.extra_base,c=e.stat_desc.max_length,b=0;for(r=0;r<=K;r++)t.bl_count[r]=0;for(l[2*t.heap[t.heap_max]+1]=0,a=t.heap_max+1;a<j;a++)(r=l[2*l[2*(i=t.heap[a])+1]+1]+1)>c&&(r=c,b++),l[2*i+1]=r,i>h||(t.bl_count[r]++,s=0,i>=u&&(s=_[i-u]),o=l[2*i],t.opt_len+=o*(r+s),f&&(t.static_len+=o*(d[2*i+1]+s)));if(0!==b){do{for(r=c-1;0===t.bl_count[r];)r--;t.bl_count[r]--,t.bl_count[r+1]+=2,t.bl_count[c]--,b-=2}while(b>0);for(r=c;0!==r;r--)for(i=t.bl_count[r];0!==i;)(n=t.heap[--a])>h||(l[2*n+1]!==r&&(t.opt_len+=(r-l[2*n+1])*l[2*n],l[2*n+1]=r),i--)}}function u(t,e,a){var i,n,r=new Array(K+1),s=0;for(i=1;i<=K;i++)r[i]=s=s+a[i-1]<<1;for(n=0;n<=e;n++){var o=t[2*n+1];0!==o&&(t[2*n]=d(r[o]++,o))}}function c(){var t,e,a,i,r,s=new Array(K+1);for(a=0,i=0;i<U-1;i++)for(it[i]=a,t=0;t<1<<W[i];t++)at[a++]=i;for(at[a-1]=i,r=0,i=0;i<16;i++)for(nt[i]=r,t=0;t<1<<J[i];t++)et[r++]=i;for(r>>=7;i<L;i++)for(nt[i]=r<<7,t=0;t<1<<J[i]-7;t++)et[256+r++]=i;for(e=0;e<=K;e++)s[e]=0;for(t=0;t<=143;)$[2*t+1]=8,t++,s[8]++;for(;t<=255;)$[2*t+1]=9,t++,s[9]++;for(;t<=279;)$[2*t+1]=7,t++,s[7]++;for(;t<=287;)$[2*t+1]=8,t++,s[8]++;for(u($,F+1,s),t=0;t<L;t++)tt[2*t+1]=5,tt[2*t]=d(t,5);rt=new n($,W,T+1,F,K),st=new n(tt,J,0,L,K),ot=new n(new Array(0),Q,0,H,P)}function b(t){var e;for(e=0;e<F;e++)t.dyn_ltree[2*e]=0;for(e=0;e<L;e++)t.dyn_dtree[2*e]=0;for(e=0;e<H;e++)t.bl_tree[2*e]=0;t.dyn_ltree[2*Y]=1,t.opt_len=t.static_len=0,t.last_lit=t.matches=0}function g(t){t.bi_valid>8?o(t,t.bi_buf):t.bi_valid>0&&(t.pending_buf[t.pending++]=t.bi_buf),t.bi_buf=0,t.bi_valid=0}function m(t,e,a,i){g(t),i&&(o(t,a),o(t,~a)),A.arraySet(t.pending_buf,t.window,e,a,t.pending),t.pending+=a}function w(t,e,a,i){var n=2*e,r=2*a;return t[n]<t[r]||t[n]===t[r]&&i[e]<=i[a]}function p(t,e,a){for(var i=t.heap[a],n=a<<1;n<=t.heap_len&&(n<t.heap_len&&w(e,t.heap[n+1],t.heap[n],t.depth)&&n++,!w(e,i,t.heap[n],t.depth));)t.heap[a]=t.heap[n],a=n,n<<=1;t.heap[a]=i}function v(t,e,a){var i,n,r,o,d=0;if(0!==t.last_lit)do{i=t.pending_buf[t.d_buf+2*d]<<8|t.pending_buf[t.d_buf+2*d+1],n=t.pending_buf[t.l_buf+d],d++,0===i?h(t,n,e):(h(t,(r=at[n])+T+1,e),0!==(o=W[r])&&l(t,n-=it[r],o),h(t,r=s(--i),a),0!==(o=J[r])&&l(t,i-=nt[r],o))}while(d<t.last_lit);h(t,Y,e)}function k(t,e){var a,i,n,r=e.dyn_tree,s=e.stat_desc.static_tree,o=e.stat_desc.has_stree,l=e.stat_desc.elems,h=-1;for(t.heap_len=0,t.heap_max=j,a=0;a<l;a++)0!==r[2*a]?(t.heap[++t.heap_len]=h=a,t.depth[a]=0):r[2*a+1]=0;for(;t.heap_len<2;)r[2*(n=t.heap[++t.heap_len]=h<2?++h:0)]=1,t.depth[n]=0,t.opt_len--,o&&(t.static_len-=s[2*n+1]);for(e.max_code=h,a=t.heap_len>>1;a>=1;a--)p(t,r,a);n=l;do{a=t.heap[1],t.heap[1]=t.heap[t.heap_len--],p(t,r,1),i=t.heap[1],t.heap[--t.heap_max]=a,t.heap[--t.heap_max]=i,r[2*n]=r[2*a]+r[2*i],t.depth[n]=(t.depth[a]>=t.depth[i]?t.depth[a]:t.depth[i])+1,r[2*a+1]=r[2*i+1]=n,t.heap[1]=n++,p(t,r,1)}while(t.heap_len>=2);t.heap[--t.heap_max]=t.heap[1],_(t,e),u(r,h,t.bl_count)}function y(t,e,a){var i,n,r=-1,s=e[1],o=0,l=7,h=4;for(0===s&&(l=138,h=3),e[2*(a+1)+1]=65535,i=0;i<=a;i++)n=s,s=e[2*(i+1)+1],++o<l&&n===s||(o<h?t.bl_tree[2*n]+=o:0!==n?(n!==r&&t.bl_tree[2*n]++,t.bl_tree[2*q]++):o<=10?t.bl_tree[2*G]++:t.bl_tree[2*X]++,o=0,r=n,0===s?(l=138,h=3):n===s?(l=6,h=3):(l=7,h=4))}function x(t,e,a){var i,n,r=-1,s=e[1],o=0,d=7,f=4;for(0===s&&(d=138,f=3),i=0;i<=a;i++)if(n=s,s=e[2*(i+1)+1],!(++o<d&&n===s)){if(o<f)do{h(t,n,t.bl_tree)}while(0!=--o);else 0!==n?(n!==r&&(h(t,n,t.bl_tree),o--),h(t,q,t.bl_tree),l(t,o-3,2)):o<=10?(h(t,G,t.bl_tree),l(t,o-3,3)):(h(t,X,t.bl_tree),l(t,o-11,7));o=0,r=n,0===s?(d=138,f=3):n===s?(d=6,f=3):(d=7,f=4)}}function z(t){var e;for(y(t,t.dyn_ltree,t.l_desc.max_code),y(t,t.dyn_dtree,t.d_desc.max_code),k(t,t.bl_desc),e=H-1;e>=3&&0===t.bl_tree[2*V[e]+1];e--);return t.opt_len+=3*(e+1)+5+5+4,e}function B(t,e,a,i){var n;for(l(t,e-257,5),l(t,a-1,5),l(t,i-4,4),n=0;n<i;n++)l(t,t.bl_tree[2*V[n]+1],3);x(t,t.dyn_ltree,e-1),x(t,t.dyn_dtree,a-1)}function S(t){var e,a=4093624447;for(e=0;e<=31;e++,a>>>=1)if(1&a&&0!==t.dyn_ltree[2*e])return R;if(0!==t.dyn_ltree[18]||0!==t.dyn_ltree[20]||0!==t.dyn_ltree[26])return C;for(e=32;e<T;e++)if(0!==t.dyn_ltree[2*e])return C;return R}function E(t,e,a,i){l(t,(O<<1)+(i?1:0),3),m(t,e,a,!0)}var A=t("../utils/common"),Z=4,R=0,C=1,N=2,O=0,D=1,I=2,U=29,T=256,F=T+1+U,L=30,H=19,j=2*F+1,K=15,M=16,P=7,Y=256,q=16,G=17,X=18,W=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],J=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],Q=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],V=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],$=new Array(2*(F+2));i($);var tt=new Array(2*L);i(tt);var et=new Array(512);i(et);var at=new Array(256);i(at);var it=new Array(U);i(it);var nt=new Array(L);i(nt);var rt,st,ot,lt=!1;a._tr_init=function(t){lt||(c(),lt=!0),t.l_desc=new r(t.dyn_ltree,rt),t.d_desc=new r(t.dyn_dtree,st),t.bl_desc=new r(t.bl_tree,ot),t.bi_buf=0,t.bi_valid=0,b(t)},a._tr_stored_block=E,a._tr_flush_block=function(t,e,a,i){var n,r,s=0;t.level>0?(t.strm.data_type===N&&(t.strm.data_type=S(t)),k(t,t.l_desc),k(t,t.d_desc),s=z(t),n=t.opt_len+3+7>>>3,(r=t.static_len+3+7>>>3)<=n&&(n=r)):n=r=a+5,a+4<=n&&-1!==e?E(t,e,a,i):t.strategy===Z||r===n?(l(t,(D<<1)+(i?1:0),3),v(t,$,tt)):(l(t,(I<<1)+(i?1:0),3),B(t,t.l_desc.max_code+1,t.d_desc.max_code+1,s+1),v(t,t.dyn_ltree,t.dyn_dtree)),b(t),i&&g(t)},a._tr_tally=function(t,e,a){return t.pending_buf[t.d_buf+2*t.last_lit]=e>>>8&255,t.pending_buf[t.d_buf+2*t.last_lit+1]=255&e,t.pending_buf[t.l_buf+t.last_lit]=255&a,t.last_lit++,0===e?t.dyn_ltree[2*a]++:(t.matches++,e--,t.dyn_ltree[2*(at[a]+T+1)]++,t.dyn_dtree[2*s(e)]++),t.last_lit===t.lit_bufsize-1},a._tr_align=function(t){l(t,D<<1,3),h(t,Y,$),f(t)}},{"../utils/common":3}],15:[function(t,e,a){"use strict";e.exports=function(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}},{}],"/":[function(t,e,a){"use strict";var i={};(0,t("./lib/utils/common").assign)(i,t("./lib/deflate"),t("./lib/inflate"),t("./lib/zlib/constants")),e.exports=i},{"./lib/deflate":1,"./lib/inflate":2,"./lib/utils/common":3,"./lib/zlib/constants":6}]},{},[])("/")});'use strict';tr.exportTo('tr.e.importer',function(){const GZIP_MEMBER_HEADER_ID_SIZE=3;const GZIP_HEADER_ID1=0x1f;const GZIP_HEADER_ID2=0x8b;const GZIP_DEFLATE_COMPRESSION=8;function _stringToUInt8Array(str){const array=new Uint8Array(str.length);for(let i=0;i<str.length;++i){array[i]=str.charCodeAt(i);} return array;} function GzipImporter(model,eventData){this.inflateAsTraceStream=false;if(typeof(eventData)==='string'||eventData instanceof String){eventData=_stringToUInt8Array(eventData);}else if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);}else if(eventData instanceof tr.b.InMemoryTraceStream){eventData=eventData.data;this.inflateAsTraceStream_=true;}else{throw new Error('Unknown gzip data format');} this.model_=model;this.gzipData_=eventData;} @@ -5251,7 +5251,7 @@ this.model_.samples.push(new tr.model.Sample(startInMs,'V8 PC',node,this.v8_thread_,undefined,1));},processDistortion_(distortionInPicoseconds){},processPlotRange_(start,end){},processV8Version_(major,minor,build,patch,candidate){},importEvents(){const logreader=new tr.e.importer.v8.LogReader({'timer-event':{parsers:[null,parseInt,parseInt],processor:this.processTimerEvent_.bind(this)},'shared-library':{parsers:[null,parseInt,parseInt],processor:this.processSharedLibrary_.bind(this)},'timer-event-start':{parsers:[null,parseInt],processor:this.processTimerEventStart_.bind(this)},'timer-event-end':{parsers:[null,parseInt],processor:this.processTimerEventEnd_.bind(this)},'code-creation':{parsers:[null,parseInt,parseInt,parseInt,null,'var-args'],processor:this.processCodeCreateEvent_.bind(this)},'code-move':{parsers:[parseInt,parseInt],processor:this.processCodeMoveEvent_.bind(this)},'code-delete':{parsers:[parseInt],processor:this.processCodeDeleteEvent_.bind(this)},'cpp':{parsers:[parseInt,parseInt,null],processor:this.processCppSymbol_.bind(this)},'tick':{parsers:[parseInt,parseInt,parseInt,parseInt,parseInt,'var-args'],processor:this.processTickEvent_.bind(this)},'distortion':{parsers:[parseInt],processor:this.processDistortion_.bind(this)},'plot-range':{parsers:[parseInt,parseInt],processor:this.processPlotRange_.bind(this)},'v8-version':{parsers:[parseInt,parseInt,parseInt,parseInt,parseInt],processor:this.processV8Version_.bind(this)}});this.v8_timer_thread_=this.model_.getOrCreateProcess(-32).getOrCreateThread(1);this.v8_timer_thread_.name='V8 Timers';this.v8_thread_=this.model_.getOrCreateProcess(-32).getOrCreateThread(2);this.v8_thread_.name='V8';const lines=this.logData_.split('\n');for(let i=0;i<lines.length;i++){logreader.processLogLine(lines[i]);} function addSlices(slices,thread){for(let i=0;i<slices.length;i++){const duration=slices[i].end-slices[i].start;const slice=new tr.model.ThreadSlice('v8',slices[i].name,ColorScheme.getColorIdForGeneralPurposeString(slices[i].name),slices[i].start,{},duration);thread.sliceGroup.pushSlice(slice);addSlices(slices[i].children,thread);}} addSlices(this.v8_stack_timeline_,this.v8_thread_);}};tr.importer.Importer.register(V8LogImporter);return{V8LogImporter,};});'use strict';if(tr.isVinn){global.window={};} -!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,f,g,h,i,j="",k=0;k<a.length;)b=a.charCodeAt(k++),c=a.charCodeAt(k++),e=a.charCodeAt(k++),f=b>>2,g=(3&b)<<4|c>>4,h=(15&c)<<2|e>>6,i=63&e,isNaN(c)?h=i=64:isNaN(e)&&(i=64),j=j+d.charAt(f)+d.charAt(g)+d.charAt(h)+d.charAt(i);return j},c.decode=function(a){var b,c,e,f,g,h,i,j="",k=0;for(a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");k<a.length;)f=d.indexOf(a.charAt(k++)),g=d.indexOf(a.charAt(k++)),h=d.indexOf(a.charAt(k++)),i=d.indexOf(a.charAt(k++)),b=f<<2|g>>4,c=(15&g)<<4|h>>2,e=(3&h)<<6|i,j+=String.fromCharCode(b),64!=h&&(j+=String.fromCharCode(c)),64!=i&&(j+=String.fromCharCode(e));return j}},{}],2:[function(a,b){"use strict";function c(){this.compressedSize=0,this.uncompressedSize=0,this.crc32=0,this.compressionMethod=null,this.compressedContent=null}c.prototype={getContent:function(){return null},getCompressedContent:function(){return null}},b.exports=c},{}],3:[function(a,b,c){"use strict";c.STORE={magic:"\x00\x00",compress:function(a){return a},uncompress:function(a){return a},compressInputType:null,uncompressInputType:null},c.DEFLATE=a("./flate")},{"./flate":8}],4:[function(a,b){"use strict";var c=a("./utils"),d=[0,1996959894,3993919788,2567524794,124634137,1886057615,3915621685,2657392035,249268274,2044508324,3772115230,2547177864,162941995,2125561021,3887607047,2428444049,498536548,1789927666,4089016648,2227061214,450548861,1843258603,4107580753,2211677639,325883990,1684777152,4251122042,2321926636,335633487,1661365465,4195302755,2366115317,997073096,1281953886,3579855332,2724688242,1006888145,1258607687,3524101629,2768942443,901097722,1119000684,3686517206,2898065728,853044451,1172266101,3705015759,2882616665,651767980,1373503546,3369554304,3218104598,565507253,1454621731,3485111705,3099436303,671266974,1594198024,3322730930,2970347812,795835527,1483230225,3244367275,3060149565,1994146192,31158534,2563907772,4023717930,1907459465,112637215,2680153253,3904427059,2013776290,251722036,2517215374,3775830040,2137656763,141376813,2439277719,3865271297,1802195444,476864866,2238001368,4066508878,1812370925,453092731,2181625025,4111451223,1706088902,314042704,2344532202,4240017532,1658658271,366619977,2362670323,4224994405,1303535960,984961486,2747007092,3569037538,1256170817,1037604311,2765210733,3554079995,1131014506,879679996,2909243462,3663771856,1141124467,855842277,2852801631,3708648649,1342533948,654459306,3188396048,3373015174,1466479909,544179635,3110523913,3462522015,1591671054,702138776,2966460450,3352799412,1504918807,783551873,3082640443,3233442989,3988292384,2596254646,62317068,1957810842,3939845945,2647816111,81470997,1943803523,3814918930,2489596804,225274430,2053790376,3826175755,2466906013,167816743,2097651377,4027552580,2265490386,503444072,1762050814,4150417245,2154129355,426522225,1852507879,4275313526,2312317920,282753626,1742555852,4189708143,2394877945,397917763,1622183637,3604390888,2714866558,953729732,1340076626,3518719985,2797360999,1068828381,1219638859,3624741850,2936675148,906185462,1090812512,3747672003,2825379669,829329135,1181335161,3412177804,3160834842,628085408,1382605366,3423369109,3138078467,570562233,1426400815,3317316542,2998733608,733239954,1555261956,3268935591,3050360625,752459403,1541320221,2607071920,3965973030,1969922972,40735498,2617837225,3943577151,1913087877,83908371,2512341634,3803740692,2075208622,213261112,2463272603,3855990285,2094854071,198958881,2262029012,4057260610,1759359992,534414190,2176718541,4139329115,1873836001,414664567,2282248934,4279200368,1711684554,285281116,2405801727,4167216745,1634467795,376229701,2685067896,3608007406,1308918612,956543938,2808555105,3495958263,1231636301,1047427035,2932959818,3654703836,1088359270,936918e3,2847714899,3736837829,1202900863,817233897,3183342108,3401237130,1404277552,615818150,3134207493,3453421203,1423857449,601450431,3009837614,3294710456,1567103746,711928724,3020668471,3272380065,1510334235,755167117];b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var e="string"!==c.getTypeOf(a);"undefined"==typeof b&&(b=0);var f=0,g=0,h=0;b=-1^b;for(var i=0,j=a.length;j>i;i++)h=e?a[i]:a.charCodeAt(i),g=255&(b^h),f=d[g],b=b>>>8^f;return-1^b}},{"./utils":21}],5:[function(a,b){"use strict";function c(){this.data=null,this.length=0,this.index=0}var d=a("./utils");c.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<a||0>a)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return d.transformTo("string",this.readData(a))},readData:function(){},lastIndexOfSignature:function(){},readDate:function(){var a=this.readInt(4);return new Date((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1)}},b.exports=c},{"./utils":21}],6:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!1,c.date=null,c.compression=null,c.comment=null},{}],7:[function(a,b,c){"use strict";var d=a("./utils");c.string2binary=function(a){return d.string2binary(a)},c.string2Uint8Array=function(a){return d.transformTo("uint8array",a)},c.uint8Array2String=function(a){return d.transformTo("string",a)},c.string2Blob=function(a){var b=d.transformTo("arraybuffer",a);return d.arrayBuffer2Blob(b)},c.arrayBuffer2Blob=function(a){return d.arrayBuffer2Blob(a)},c.transformTo=function(a,b){return d.transformTo(a,b)},c.getTypeOf=function(a){return d.getTypeOf(a)},c.checkSupport=function(a){return d.checkSupport(a)},c.MAX_VALUE_16BITS=d.MAX_VALUE_16BITS,c.MAX_VALUE_32BITS=d.MAX_VALUE_32BITS,c.pretty=function(a){return d.pretty(a)},c.findCompression=function(a){return d.findCompression(a)},c.isRegExp=function(a){return d.isRegExp(a)}},{"./utils":21}],8:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,e=a("pako");c.uncompressInputType=d?"uint8array":"array",c.compressInputType=d?"uint8array":"array",c.magic="\b\x00",c.compress=function(a){return e.deflateRaw(a)},c.uncompress=function(a){return e.inflateRaw(a)}},{pako:24}],9:[function(a,b){"use strict";function c(a,b){return this instanceof c?(this.files={},this.comment=null,this.root="",a&&this.load(a,b),void(this.clone=function(){var a=new c;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a})):new c(a,b)}var d=a("./base64");c.prototype=a("./object"),c.prototype.load=a("./load"),c.support=a("./support"),c.defaults=a("./defaults"),c.utils=a("./deprecatedPublicUtils"),c.base64={encode:function(a){return d.encode(a)},decode:function(a){return d.decode(a)}},c.compressions=a("./compressions"),b.exports=c},{"./base64":1,"./compressions":3,"./defaults":6,"./deprecatedPublicUtils":7,"./load":10,"./object":13,"./support":17}],10:[function(a,b){"use strict";var c=a("./base64"),d=a("./zipEntries");b.exports=function(a,b){var e,f,g,h;for(b=b||{},b.base64&&(a=c.decode(a)),f=new d(a,b),e=f.files,g=0;g<e.length;g++)h=e[g],this.file(h.fileName,h.decompressed,{binary:!0,optimizedBinaryString:!0,date:h.date,dir:h.dir,comment:h.fileComment.length?h.fileComment:null,createFolders:b.createFolders});return f.zipComment.length&&(this.comment=f.zipComment),this}},{"./base64":1,"./zipEntries":22}],11:[function(a,b){(function(a){"use strict";b.exports=function(b,c){return new a(b,c)},b.exports.test=function(b){return a.isBuffer(b)}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],12:[function(a,b){"use strict";function c(a){this.data=a,this.length=this.data.length,this.index=0}var d=a("./uint8ArrayReader");c.prototype=new d,c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./uint8ArrayReader":18}],13:[function(a,b){"use strict";var c=a("./support"),d=a("./utils"),e=a("./crc32"),f=a("./signature"),g=a("./defaults"),h=a("./base64"),i=a("./compressions"),j=a("./compressedObject"),k=a("./nodeBuffer"),l=a("./utf8"),m=a("./stringWriter"),n=a("./uint8ArrayWriter"),o=function(a){if(a._data instanceof j&&(a._data=a._data.getContent(),a.options.binary=!0,a.options.base64=!1,"uint8array"===d.getTypeOf(a._data))){var b=a._data;a._data=new Uint8Array(b.length),0!==b.length&&a._data.set(b,0)}return a._data},p=function(a){var b=o(a),e=d.getTypeOf(b);return"string"===e?!a.options.binary&&c.nodebuffer?k(b,"utf-8"):a.asBinary():b},q=function(a){var b=o(this);return null===b||"undefined"==typeof b?"":(this.options.base64&&(b=h.decode(b)),b=a&&this.options.binary?A.utf8decode(b):d.transformTo("string",b),a||this.options.binary||(b=d.transformTo("string",A.utf8encode(b))),b)},r=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this._data=b,this.options=c,this._initialMetadata={dir:c.dir,date:c.date}};r.prototype={asText:function(){return q.call(this,!0)},asBinary:function(){return q.call(this,!1)},asNodeBuffer:function(){var a=p(this);return d.transformTo("nodebuffer",a)},asUint8Array:function(){var a=p(this);return d.transformTo("uint8array",a)},asArrayBuffer:function(){return this.asUint8Array().buffer}};var s=function(a,b){var c,d="";for(c=0;b>c;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},t=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},u=function(a){return a=a||{},a.base64!==!0||null!==a.binary&&void 0!==a.binary||(a.binary=!0),a=t(a,g),a.date=a.date||new Date,null!==a.compression&&(a.compression=a.compression.toUpperCase()),a},v=function(a,b,c){var e,f=d.getTypeOf(b);if(c=u(c),c.createFolders&&(e=w(a))&&x.call(this,e,!0),c.dir||null===b||"undefined"==typeof b)c.base64=!1,c.binary=!1,b=null;else if("string"===f)c.binary&&!c.base64&&c.optimizedBinaryString!==!0&&(b=d.string2binary(b));else{if(c.base64=!1,c.binary=!0,!(f||b instanceof j))throw new Error("The data of '"+a+"' is in an unsupported format !");"arraybuffer"===f&&(b=d.transformTo("uint8array",b))}var g=new r(a,b,c);return this.files[a]=g,g},w=function(a){"/"==a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},x=function(a,b){return"/"!=a.slice(-1)&&(a+="/"),b="undefined"!=typeof b?b:!1,this.files[a]||v.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},y=function(a,b){var c,f=new j;return a._data instanceof j?(f.uncompressedSize=a._data.uncompressedSize,f.crc32=a._data.crc32,0===f.uncompressedSize||a.dir?(b=i.STORE,f.compressedContent="",f.crc32=0):a._data.compressionMethod===b.magic?f.compressedContent=a._data.getCompressedContent():(c=a._data.getContent(),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c)))):(c=p(a),(!c||0===c.length||a.dir)&&(b=i.STORE,c=""),f.uncompressedSize=c.length,f.crc32=e(c),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c))),f.compressedSize=f.compressedContent.length,f.compressionMethod=b.magic,f},z=function(a,b,c,g){var h,i,j,k,m=(c.compressedContent,d.transformTo("string",l.utf8encode(b.name))),n=b.comment||"",o=d.transformTo("string",l.utf8encode(n)),p=m.length!==b.name.length,q=o.length!==n.length,r=b.options,t="",u="",v="";j=b._initialMetadata.dir!==b.dir?b.dir:r.dir,k=b._initialMetadata.date!==b.date?b.date:r.date,h=k.getHours(),h<<=6,h|=k.getMinutes(),h<<=5,h|=k.getSeconds()/2,i=k.getFullYear()-1980,i<<=4,i|=k.getMonth()+1,i<<=5,i|=k.getDate(),p&&(u=s(1,1)+s(e(m),4)+m,t+="up"+s(u.length,2)+u),q&&(v=s(1,1)+s(this.crc32(o),4)+o,t+="uc"+s(v.length,2)+v);var w="";w+="\n\x00",w+=p||q?"\x00\b":"\x00\x00",w+=c.compressionMethod,w+=s(h,2),w+=s(i,2),w+=s(c.crc32,4),w+=s(c.compressedSize,4),w+=s(c.uncompressedSize,4),w+=s(m.length,2),w+=s(t.length,2);var x=f.LOCAL_FILE_HEADER+w+m+t,y=f.CENTRAL_FILE_HEADER+"\x00"+w+s(o.length,2)+"\x00\x00\x00\x00"+(j===!0?"\x00\x00\x00":"\x00\x00\x00\x00")+s(g,4)+m+t+o;return{fileRecord:x,dirRecord:y,compressedObject:c}},A={load:function(){throw new Error("Load method is not defined. Is the file jszip-load.js included ?")},filter:function(a){var b,c,d,e,f=[];for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],e=new r(d.name,d._data,t(d.options)),c=b.slice(this.root.length,b.length),b.slice(0,this.root.length)===this.root&&a(c,e)&&f.push(e));return f},file:function(a,b,c){if(1===arguments.length){if(d.isRegExp(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}return this.filter(function(b,c){return!c.dir&&b===a})[0]||null}return a=this.root+a,v.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d.isRegExp(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=x.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!=a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){a=t(a||{},{base64:!0,compression:"STORE",type:"base64",comment:null}),d.checkSupport(a.type);var b,c,e=[],g=0,j=0,k=d.transformTo("string",this.utf8encode(a.comment||this.comment||""));for(var l in this.files)if(this.files.hasOwnProperty(l)){var o=this.files[l],p=o.options.compression||a.compression.toUpperCase(),q=i[p];if(!q)throw new Error(p+" is not a valid compression method !");var r=y.call(this,o,q),u=z.call(this,l,o,r,g);g+=u.fileRecord.length+r.compressedSize,j+=u.dirRecord.length,e.push(u)}var v="";v=f.CENTRAL_DIRECTORY_END+"\x00\x00\x00\x00"+s(e.length,2)+s(e.length,2)+s(j,4)+s(g,4)+s(k.length,2)+k;var w=a.type.toLowerCase();for(b="uint8array"===w||"arraybuffer"===w||"blob"===w||"nodebuffer"===w?new n(g+j+v.length):new m(g+j+v.length),c=0;c<e.length;c++)b.append(e[c].fileRecord),b.append(e[c].compressedObject.compressedContent);for(c=0;c<e.length;c++)b.append(e[c].dirRecord);b.append(v);var x=b.finalize();switch(a.type.toLowerCase()){case"uint8array":case"arraybuffer":case"nodebuffer":return d.transformTo(a.type.toLowerCase(),x);case"blob":return d.arrayBuffer2Blob(d.transformTo("arraybuffer",x));case"base64":return a.base64?h.encode(x):x;default:return x}},crc32:function(a,b){return e(a,b)},utf8encode:function(a){return d.transformTo("string",l.utf8encode(a))},utf8decode:function(a){return l.utf8decode(a)}};b.exports=A},{"./base64":1,"./compressedObject":2,"./compressions":3,"./crc32":4,"./defaults":6,"./nodeBuffer":11,"./signature":14,"./stringWriter":16,"./support":17,"./uint8ArrayWriter":19,"./utf8":20,"./utils":21}],14:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],15:[function(a,b){"use strict";function c(a,b){this.data=a,b||(this.data=e.string2binary(this.data)),this.length=this.data.length,this.index=0}var d=a("./dataReader"),e=a("./utils");c.prototype=new d,c.prototype.byteAt=function(a){return this.data.charCodeAt(a)},c.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)},c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5,"./utils":21}],16:[function(a,b){"use strict";var c=a("./utils"),d=function(){this.data=[]};d.prototype={append:function(a){a=c.transformTo("string",a),this.data.push(a)},finalize:function(){return this.data.join("")}},b.exports=d},{"./utils":21}],17:[function(a,b,c){(function(a){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof a,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var b=new ArrayBuffer(0);try{c.blob=0===new Blob([b],{type:"application/zip"}).size}catch(d){try{var e=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,f=new e;f.append(b),c.blob=0===f.getBlob("application/zip").size}catch(d){c.blob=!1}}}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],18:[function(a,b){"use strict";function c(a){a&&(this.data=a,this.length=this.data.length,this.index=0)}var d=a("./dataReader");c.prototype=new d,c.prototype.byteAt=function(a){return this.data[a]},c.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f;return-1},c.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5}],19:[function(a,b){"use strict";var c=a("./utils"),d=function(a){this.data=new Uint8Array(a),this.index=0};d.prototype={append:function(a){0!==a.length&&(a=c.transformTo("uint8array",a),this.data.set(a,this.index),this.index+=a.length)},finalize:function(){return this.data}},b.exports=d},{"./utils":21}],20:[function(a,b,c){"use strict";for(var d=a("./utils"),e=a("./support"),f=a("./nodeBuffer"),g=new Array(256),h=0;256>h;h++)g[h]=h>=252?6:h>=248?5:h>=240?4:h>=224?3:h>=192?2:1;g[254]=g[254]=1;var i=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=e.uint8array?new Uint8Array(i):new Array(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},j=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+g[a[c]]>b?c:b},k=function(a){var b,c,e,f,h=a.length,i=new Array(2*h);for(c=0,b=0;h>b;)if(e=a[b++],128>e)i[c++]=e;else if(f=g[e],f>4)i[c++]=65533,b+=f-1;else{for(e&=2===f?31:3===f?15:7;f>1&&h>b;)e=e<<6|63&a[b++],f--;f>1?i[c++]=65533:65536>e?i[c++]=e:(e-=65536,i[c++]=55296|e>>10&1023,i[c++]=56320|1023&e)}return i.length!==c&&(i.subarray?i=i.subarray(0,c):i.length=c),d.applyFromCharCode(i)};c.utf8encode=function(a){return e.nodebuffer?f(a,"utf-8"):i(a)},c.utf8decode=function(a){if(e.nodebuffer)return d.transformTo("nodebuffer",a).toString("utf-8");a=d.transformTo(e.uint8array?"uint8array":"array",a);for(var b=[],c=0,f=a.length,g=65536;f>c;){var h=j(a,Math.min(c+g,f));b.push(e.uint8array?k(a.subarray(c,h)):k(a.slice(c,h))),c=h}return b.join("")}},{"./nodeBuffer":11,"./support":17,"./utils":21}],21:[function(a,b,c){"use strict";function d(a){return a}function e(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function f(a){var b=65536,d=[],e=a.length,f=c.getTypeOf(a),g=0,h=!0;try{switch(f){case"uint8array":String.fromCharCode.apply(null,new Uint8Array(0));break;case"nodebuffer":String.fromCharCode.apply(null,j(0))}}catch(i){h=!1}if(!h){for(var k="",l=0;l<a.length;l++)k+=String.fromCharCode(a[l]);return k}for(;e>g&&b>1;)try{d.push("array"===f||"nodebuffer"===f?String.fromCharCode.apply(null,a.slice(g,Math.min(g+b,e))):String.fromCharCode.apply(null,a.subarray(g,Math.min(g+b,e)))),g+=b}catch(i){b=Math.floor(b/2)}return d.join("")}function g(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];return b}var h=a("./support"),i=a("./compressions"),j=a("./nodeBuffer");c.string2binary=function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(255&a.charCodeAt(c));return b},c.arrayBuffer2Blob=function(a){c.checkSupport("blob");try{return new Blob([a],{type:"application/zip"})}catch(b){try{var d=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,e=new d;return e.append(a),e.getBlob("application/zip")}catch(b){throw new Error("Bug : can't construct the Blob.")}}},c.applyFromCharCode=f;var k={};k.string={string:d,array:function(a){return e(a,new Array(a.length))},arraybuffer:function(a){return k.string.uint8array(a).buffer},uint8array:function(a){return e(a,new Uint8Array(a.length))},nodebuffer:function(a){return e(a,j(a.length))}},k.array={string:f,array:d,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(a)}},k.arraybuffer={string:function(a){return f(new Uint8Array(a))},array:function(a){return g(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:d,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(new Uint8Array(a))}},k.uint8array={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:d,nodebuffer:function(a){return j(a)}},k.nodebuffer={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return k.nodebuffer.uint8array(a).buffer},uint8array:function(a){return g(a,new Uint8Array(a.length))},nodebuffer:d},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=k[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":h.nodebuffer&&j.test(a)?"nodebuffer":h.uint8array&&a instanceof Uint8Array?"uint8array":h.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=h[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this browser")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(16>b?"0":"")+b.toString(16).toUpperCase();return d},c.findCompression=function(a){for(var b in i)if(i.hasOwnProperty(b)&&i[b].magic===a)return i[b];return null},c.isRegExp=function(a){return"[object RegExp]"===Object.prototype.toString.call(a)}},{"./compressions":3,"./nodeBuffer":11,"./support":17}],22:[function(a,b){"use strict";function c(a,b){this.files=[],this.loadOptions=b,a&&this.load(a)}var d=a("./stringReader"),e=a("./nodeBufferReader"),f=a("./uint8ArrayReader"),g=a("./utils"),h=a("./signature"),i=a("./zipEntry"),j=a("./support"),k=a("./object");c.prototype={checkSignature:function(a){var b=this.reader.readString(4);if(b!==a)throw new Error("Corrupted zip or bug : unexpected signature ("+g.pretty(b)+", expected "+g.pretty(a)+")")},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2),this.zipComment=this.reader.readString(this.zipCommentLength),this.zipComment=k.utf8decode(this.zipComment)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.versionMadeBy=this.reader.readString(2),this.versionNeeded=this.reader.readInt(2),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;d>e;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readString(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(h.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readString(4)===h.CENTRAL_FILE_HEADER;)a=new i({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(h.CENTRAL_DIRECTORY_END);if(-1===a)throw new Error("Corrupted zip : can't find end of central directory");if(this.reader.setIndex(a),this.checkSignature(h.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===g.MAX_VALUE_16BITS||this.diskWithCentralDirStart===g.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===g.MAX_VALUE_16BITS||this.centralDirRecords===g.MAX_VALUE_16BITS||this.centralDirSize===g.MAX_VALUE_32BITS||this.centralDirOffset===g.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),-1===a)throw new Error("Corrupted zip : can't find the ZIP64 end of central directory locator");this.reader.setIndex(a),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}},prepareReader:function(a){var b=g.getTypeOf(a);this.reader="string"!==b||j.uint8array?"nodebuffer"===b?new e(a):new f(g.transformTo("uint8array",a)):new d(a,this.loadOptions.optimizedBinaryString)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=c},{"./nodeBufferReader":12,"./object":13,"./signature":14,"./stringReader":15,"./support":17,"./uint8ArrayReader":18,"./utils":21,"./zipEntry":23}],23:[function(a,b){"use strict";function c(a,b){this.options=a,this.loadOptions=b}var d=a("./stringReader"),e=a("./utils"),f=a("./compressedObject"),g=a("./object");c.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},prepareCompressedContent:function(a,b,c){return function(){var d=a.index;a.setIndex(b);var e=a.readData(c);return a.setIndex(d),e}},prepareContent:function(a,b,c,d,f){return function(){var a=e.transformTo(d.uncompressInputType,this.getCompressedContent()),b=d.uncompress(a);if(b.length!==f)throw new Error("Bug : uncompressed data size mismatch");return b}},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readString(this.fileNameLength),a.skip(c),-1==this.compressedSize||-1==this.uncompressedSize)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize == -1 || uncompressedSize == -1)");if(b=e.findCompression(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+e.pretty(this.compressionMethod)+" unknown (inner file : "+this.fileName+")");if(this.decompressed=new f,this.decompressed.compressedSize=this.compressedSize,this.decompressed.uncompressedSize=this.uncompressedSize,this.decompressed.crc32=this.crc32,this.decompressed.compressionMethod=this.compressionMethod,this.decompressed.getCompressedContent=this.prepareCompressedContent(a,a.index,this.compressedSize,b),this.decompressed.getContent=this.prepareContent(a,a.index,this.compressedSize,b,this.uncompressedSize),this.loadOptions.checkCRC32&&(this.decompressed=e.transformTo("string",this.decompressed.getContent()),g.crc32(this.decompressed)!==this.crc32))throw new Error("Corrupted zip : CRC32 mismatch")},readCentralPart:function(a){if(this.versionMadeBy=a.readString(2),this.versionNeeded=a.readInt(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4),this.fileNameLength=a.readInt(2),this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");this.fileName=a.readString(this.fileNameLength),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readString(this.fileCommentLength),this.dir=16&this.externalFileAttributes?!0:!1},parseZIP64ExtraField:function(){if(this.extraFields[1]){var a=new d(this.extraFields[1].value);this.uncompressedSize===e.MAX_VALUE_32BITS&&(this.uncompressedSize=a.readInt(8)),this.compressedSize===e.MAX_VALUE_32BITS&&(this.compressedSize=a.readInt(8)),this.localHeaderOffset===e.MAX_VALUE_32BITS&&(this.localHeaderOffset=a.readInt(8)),this.diskNumberStart===e.MAX_VALUE_32BITS&&(this.diskNumberStart=a.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index;for(this.extraFields=this.extraFields||{};a.index<e+this.extraFieldsLength;)b=a.readInt(2),c=a.readInt(2),d=a.readString(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){if(this.useUTF8())this.fileName=g.utf8decode(this.fileName),this.fileComment=g.utf8decode(this.fileComment);else{var a=this.findExtraFieldUnicodePath();null!==a&&(this.fileName=a);var b=this.findExtraFieldUnicodeComment();null!==b&&(this.fileComment=b)}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileName)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileComment)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null}},b.exports=c},{"./compressedObject":2,"./object":13,"./stringReader":15,"./utils":21}],24:[function(a,b){"use strict";var c=a("./lib/utils/common").assign,d=a("./lib/deflate"),e=a("./lib/inflate"),f=a("./lib/zlib/constants"),g={};c(g,d,e,f),b.exports=g},{"./lib/deflate":25,"./lib/inflate":26,"./lib/utils/common":27,"./lib/zlib/constants":30}],25:[function(a,b,c){"use strict";function d(a,b){var c=new s(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}function f(a,b){return b=b||{},b.gzip=!0,d(a,b)}var g=a("./zlib/deflate.js"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=0,m=4,n=0,o=1,p=-1,q=0,r=8,s=function(a){this.options=h.assign({level:p,method:r,chunkSize:16384,windowBits:15,memLevel:8,strategy:q,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=g.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==n)throw new Error(j[c]);b.header&&g.deflateSetHeader(this.strm,b.header)};s.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?m:l,e.input="string"==typeof a?i.string2buf(a):a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new h.Buf8(f),e.next_out=0,e.avail_out=f),c=g.deflate(e,d),c!==o&&c!==n)return this.onEnd(c),this.ended=!0,!1;(0===e.avail_out||0===e.avail_in&&d===m)&&this.onData("string"===this.options.to?i.buf2binstring(h.shrinkBuf(e.output,e.next_out)):h.shrinkBuf(e.output,e.next_out))}while((e.avail_in>0||0===e.avail_out)&&c!==o);return d===m?(c=g.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===n):!0},s.prototype.onData=function(a){this.chunks.push(a)},s.prototype.onEnd=function(a){a===n&&(this.result="string"===this.options.to?this.chunks.join(""):h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=s,c.deflate=d,c.deflateRaw=e,c.gzip=f},{"./utils/common":27,"./utils/strings":28,"./zlib/deflate.js":32,"./zlib/messages":37,"./zlib/zstream":39}],26:[function(a,b,c){"use strict";function d(a,b){var c=new m(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}var f=a("./zlib/inflate.js"),g=a("./utils/common"),h=a("./utils/strings"),i=a("./zlib/constants"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=a("./zlib/gzheader"),m=function(a){this.options=g.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=f.inflateInit2(this.strm,b.windowBits);if(c!==i.Z_OK)throw new Error(j[c]);this.header=new l,f.inflateGetHeader(this.strm,this.header)};m.prototype.push=function(a,b){var c,d,e,j,k,l=this.strm,m=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?i.Z_FINISH:i.Z_NO_FLUSH,l.input="string"==typeof a?h.binstring2buf(a):a,l.next_in=0,l.avail_in=l.input.length;do{if(0===l.avail_out&&(l.output=new g.Buf8(m),l.next_out=0,l.avail_out=m),c=f.inflate(l,i.Z_NO_FLUSH),c!==i.Z_STREAM_END&&c!==i.Z_OK)return this.onEnd(c),this.ended=!0,!1;l.next_out&&(0===l.avail_out||c===i.Z_STREAM_END||0===l.avail_in&&d===i.Z_FINISH)&&("string"===this.options.to?(e=h.utf8border(l.output,l.next_out),j=l.next_out-e,k=h.buf2string(l.output,e),l.next_out=j,l.avail_out=m-j,j&&g.arraySet(l.output,l.output,e,j,0),this.onData(k)):this.onData(g.shrinkBuf(l.output,l.next_out)))}while(l.avail_in>0&&c!==i.Z_STREAM_END);return c===i.Z_STREAM_END&&(d=i.Z_FINISH),d===i.Z_FINISH?(c=f.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===i.Z_OK):!0},m.prototype.onData=function(a){this.chunks.push(a)},m.prototype.onEnd=function(a){a===i.Z_OK&&(this.result="string"===this.options.to?this.chunks.join(""):g.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=m,c.inflate=d,c.inflateRaw=e,c.ungzip=d},{"./utils/common":27,"./utils/strings":28,"./zlib/constants":30,"./zlib/gzheader":33,"./zlib/inflate.js":35,"./zlib/messages":37,"./zlib/zstream":39}],27:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;c>b;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;c>b;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],28:[function(a,b,c){"use strict";function d(a,b){if(65537>b&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;b>d;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;256>j;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=new e.Buf8(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;d>c;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;h>c;)if(f=a[c++],128>f)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&h>c;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:65536>f?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":27}],29:[function(a,b){"use strict";function c(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0}b.exports=c},{}],30:[function(a,b){b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],31:[function(a,b){"use strict";function c(){for(var a,b=[],c=0;256>c;c++){a=c;for(var d=0;8>d;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function d(a,b,c,d){var f=e,g=d+c;a=-1^a;for(var h=d;g>h;h++)a=a>>>8^f[255&(a^b[h])];return-1^a}var e=c();b.exports=d},{}],32:[function(a,b,c){"use strict";function d(a,b){return a.msg=G[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(C.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){D._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,C.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=E(a.adler,b,e,c):2===a.state.wrap&&(a.adler=F(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-jb?a.strstart-(a.w_size-jb):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ib,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&m>f);if(d=ib-(m-f),f=m-ib,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-jb)){C.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=hb)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+hb-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<hb)););}while(a.lookahead<jb&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===H)return sb;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return sb;if(a.strstart-a.block_start>=a.w_size-jb&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?sb:sb}function o(a,b){for(var c,d;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c)),a.match_length>=hb)if(d=D._tr_tally(a,a.strstart-a.match_start,a.match_length-hb),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=hb){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function p(a,b){for(var c,d,e;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=hb-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===S||a.match_length===hb&&a.strstart-a.match_start>4096)&&(a.match_length=hb-1)),a.prev_length>=hb&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-hb,d=D._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-hb),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=hb-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return sb}else if(a.match_available){if(d=D._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return sb}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=D._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ib){if(m(a),a.lookahead<=ib&&b===H)return sb;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=hb&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ib;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&f>e);a.match_length=ib-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=hb?(c=D._tr_tally(a,1,a.match_length-hb),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===H)return sb;break}if(a.match_length=0,c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function s(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=B[a.level].max_lazy,a.good_match=B[a.level].good_length,a.nice_match=B[a.level].nice_length,a.max_chain_length=B[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=hb-1,a.match_available=0,a.ins_h=0}function t(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=Y,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new C.Buf16(2*fb),this.dyn_dtree=new C.Buf16(2*(2*db+1)),this.bl_tree=new C.Buf16(2*(2*eb+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new C.Buf16(gb+1),this.heap=new C.Buf16(2*cb+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new C.Buf16(2*cb+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function u(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=X,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?lb:qb,a.adler=2===b.wrap?0:1,b.last_flush=H,D._tr_init(b),M):d(a,O)}function v(a){var b=u(a);return b===M&&s(a.state),b}function w(a,b){return a&&a.state?2!==a.state.wrap?O:(a.state.gzhead=b,M):O}function x(a,b,c,e,f,g){if(!a)return O;var h=1;if(b===R&&(b=6),0>e?(h=0,e=-e):e>15&&(h=2,e-=16),1>f||f>Z||c!==Y||8>e||e>15||0>b||b>9||0>g||g>V)return d(a,O);8===e&&(e=9);var i=new t;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+hb-1)/hb),i.window=new C.Buf8(2*i.w_size),i.head=new C.Buf16(i.hash_size),i.prev=new C.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new C.Buf8(i.pending_buf_size),i.d_buf=i.lit_bufsize>>1,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,v(a)}function y(a,b){return x(a,b,Y,$,_,W)}function z(a,b){var c,h,k,l;if(!a||!a.state||b>L||0>b)return a?d(a,O):O;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===rb&&b!==K)return d(a,0===a.avail_out?Q:O);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===lb)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=F(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=mb):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,wb),h.status=qb);else{var m=Y+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=T||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=kb),m+=31-m%31,h.status=qb,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===mb)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=nb)}else h.status=nb;if(h.status===nb)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=ob)}else h.status=ob;if(h.status===ob)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=pb)}else h.status=pb;if(h.status===pb&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=qb)):h.status=qb),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,M}else if(0===a.avail_in&&e(b)<=e(c)&&b!==K)return d(a,Q);if(h.status===rb&&0!==a.avail_in)return d(a,Q);if(0!==a.avail_in||0!==h.lookahead||b!==H&&h.status!==rb){var o=h.strategy===T?r(h,b):h.strategy===U?q(h,b):B[h.level].func(h,b);if((o===ub||o===vb)&&(h.status=rb),o===sb||o===ub)return 0===a.avail_out&&(h.last_flush=-1),M;if(o===tb&&(b===I?D._tr_align(h):b!==L&&(D._tr_stored_block(h,0,0,!1),b===J&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,M}return b!==K?M:h.wrap<=0?N:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?M:N)}function A(a){var b;return a&&a.state?(b=a.state.status,b!==lb&&b!==mb&&b!==nb&&b!==ob&&b!==pb&&b!==qb&&b!==rb?d(a,O):(a.state=null,b===qb?d(a,P):M)):O}var B,C=a("../utils/common"),D=a("./trees"),E=a("./adler32"),F=a("./crc32"),G=a("./messages"),H=0,I=1,J=3,K=4,L=5,M=0,N=1,O=-2,P=-3,Q=-5,R=-1,S=1,T=2,U=3,V=4,W=0,X=2,Y=8,Z=9,$=15,_=8,ab=29,bb=256,cb=bb+1+ab,db=30,eb=19,fb=2*cb+1,gb=15,hb=3,ib=258,jb=ib+hb+1,kb=32,lb=42,mb=69,nb=73,ob=91,pb=103,qb=113,rb=666,sb=1,tb=2,ub=3,vb=4,wb=3,xb=function(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e};B=[new xb(0,0,0,0,n),new xb(4,4,8,4,o),new xb(4,5,16,8,o),new xb(4,6,32,32,o),new xb(4,4,16,16,p),new xb(8,16,32,32,p),new xb(8,16,128,128,p),new xb(8,32,128,256,p),new xb(32,128,258,1024,p),new xb(32,258,258,4096,p)],c.deflateInit=y,c.deflateInit2=x,c.deflateReset=v,c.deflateResetKeep=u,c.deflateSetHeader=w,c.deflate=z,c.deflateEnd=A,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./messages":37,"./trees":38}],33:[function(a,b){"use strict";function c(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=c},{}],34:[function(a,b){"use strict";var c=30,d=12;b.exports=function(a,b){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;e=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=e.dmax,l=e.wsize,m=e.whave,n=e.wnext,o=e.window,p=e.hold,q=e.bits,r=e.lencode,s=e.distcode,t=(1<<e.lenbits)-1,u=(1<<e.distbits)-1;a:do{15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){e.mode=d;break a}a.msg="invalid literal/length code",e.mode=c;break a}x=65535&v,w&=15,w&&(w>q&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",e.mode=c;break a}if(y=65535&v,w&=15,w>q&&(p+=B[f++]<<q,q+=8,w>q&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",e.mode=c;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&e.correct){a.msg="invalid distance too far back",e.mode=c;break a}if(z=0,A=o,0===n){if(z+=l-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(w>n){if(z+=l+n-w,w-=n,x>w){x-=w;do C[h++]=o[z++];while(--w);if(z=0,x>n){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(g>f&&j>h);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=g>f?5+(g-f):5-(f-g),a.avail_out=j>h?257+(j-h):257-(h-j),e.hold=p,e.bits=q}},{}],35:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new r.Buf16(320),this.work=new r.Buf16(288),this.lendyn=null,this.distdyn=null,this.correct=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=K,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new r.Buf32(ob),b.distcode=b.distdyn=new r.Buf32(pb),b.correct=1,b.back=-1,C):F}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):F}function h(a,b){var c,d;return a&&a.state?(d=a.state,0>b?(c=0,b=-b):(c=(b>>4)+1,48>b&&(b&=15)),b&&(8>b||b>15)?F:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):F}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==C&&(a.state=null),c):F}function j(a){return i(a,rb)}function k(a){if(sb){var b;for(p=new r.Buf32(512),q=new r.Buf32(32),b=0;144>b;)a.lens[b++]=8;for(;256>b;)a.lens[b++]=9;for(;280>b;)a.lens[b++]=7;for(;288>b;)a.lens[b++]=8;for(v(x,a.lens,0,288,p,0,a.work,{bits:9}),b=0;32>b;)a.lens[b++]=5;v(y,a.lens,0,32,q,0,a.work,{bits:5}),sb=!1}a.lencode=p,a.lenbits=9,a.distcode=q,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new r.Buf8(f.wsize)),d>=f.wsize?(r.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),r.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(r.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,ob,pb,qb,rb,sb,tb,ub,vb,wb,xb,yb,zb,Ab=0,Bb=new r.Buf8(4),Cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return F;c=a.state,c.mode===V&&(c.mode=W),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xb=C;a:for(;;)switch(c.mode){case K:if(0===c.wrap){c.mode=W;break}for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0),m=0,n=0,c.mode=L;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=lb;break}if((15&m)!==J){a.msg="unknown compression method",c.mode=lb;break}if(m>>>=4,n-=4,wb=(15&m)+8,0===c.wbits)c.wbits=wb;else if(wb>c.wbits){a.msg="invalid window size",c.mode=lb;break}c.dmax=1<<wb,a.adler=c.check=1,c.mode=512&m?T:V,m=0,n=0;break;case L:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==J){a.msg="unknown compression method",c.mode=lb;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=lb;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=M;case M:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,Bb[2]=m>>>16&255,Bb[3]=m>>>24&255,c.check=t(c.check,Bb,4,0)),m=0,n=0,c.mode=N;case N:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=O;case O:if(1024&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=P;case P:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wb=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),r.arraySet(c.head.extra,e,g,q,wb)),512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=Q;case Q:if(2048&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.name+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=R;case R:if(4096&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.comment+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.comment=null);c.mode=S;case S:if(512&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=lb;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=V;break;case T:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=U;case U:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,E;a.adler=c.check=1,c.mode=V;case V:if(b===A||b===B)break a;case W:if(c.last){m>>>=7&n,n-=7&n,c.mode=ib;break}for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=X;break;case 1:if(k(c),c.mode=bb,b===B){m>>>=2,n-=2;break a}break;case 2:c.mode=$;break;case 3:a.msg="invalid block type",c.mode=lb}m>>>=2,n-=2;break;case X:for(m>>>=7&n,n-=7&n;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=lb;break}if(c.length=65535&m,m=0,n=0,c.mode=Y,b===B)break a;case Y:c.mode=Z;case Z:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;r.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=V;break;case $:for(;14>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=lb;break}c.have=0,c.mode=_;case _:for(;c.have<c.ncode;){for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Cb[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Cb[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,yb={bits:c.lenbits},xb=v(w,c.lens,0,19,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid code lengths set",c.mode=lb;break}c.have=0,c.mode=ab;case ab:for(;c.have<c.nlen+c.ndist;){for(;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(16>sb)m>>>=qb,n-=qb,c.lens[c.have++]=sb;else{if(16===sb){for(zb=qb+2;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qb,n-=qb,0===c.have){a.msg="invalid bit length repeat",c.mode=lb;break}wb=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sb){for(zb=qb+3;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=3+(7&m),m>>>=3,n-=3}else{for(zb=qb+7;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=lb;break}for(;q--;)c.lens[c.have++]=wb}}if(c.mode===lb)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=lb;break}if(c.lenbits=9,yb={bits:c.lenbits},xb=v(x,c.lens,0,c.nlen,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid literal/lengths set",c.mode=lb;break}if(c.distbits=6,c.distcode=c.distdyn,yb={bits:c.distbits},xb=v(y,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,yb),c.distbits=yb.bits,xb){a.msg="invalid distances set",c.mode=lb;break}if(c.mode=bb,b===B)break a;case bb:c.mode=cb;case cb:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,u(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===V&&(c.back=-1);break}for(c.back=0;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(rb&&0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.lencode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,c.length=sb,0===rb){c.mode=hb;break}if(32&rb){c.back=-1,c.mode=V;break}if(64&rb){a.msg="invalid literal/length code",c.mode=lb;break}c.extra=15&rb,c.mode=db;case db:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=eb;case eb:for(;Ab=c.distcode[m&(1<<c.distbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.distcode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,64&rb){a.msg="invalid distance code",c.mode=lb;break}c.offset=sb,c.extra=15&rb,c.mode=fb;case fb:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=lb;break}c.mode=gb;case gb:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.correct){a.msg="invalid distance too far back",c.mode=lb;break}q>c.wnext?(q-=c.wnext,ob=c.wsize-q):ob=c.wnext-q,q>c.length&&(q=c.length),pb=c.window}else pb=f,ob=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pb[ob++];while(--q);0===c.length&&(c.mode=cb);break;case hb:if(0===j)break a;f[h++]=c.length,j--,c.mode=cb;break;case ib:if(c.wrap){for(;32>n;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?t(c.check,f,p,h-p):s(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=lb;break}m=0,n=0}c.mode=jb;case jb:if(c.wrap&&c.flags){for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=lb;break}m=0,n=0}c.mode=kb;case kb:xb=D;break a;case lb:xb=G;break a;case mb:return H;case nb:default:return F}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<lb&&(c.mode<ib||b!==z))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=mb,H):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?t(c.check,f,p,a.next_out-p):s(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===V?128:0)+(c.mode===bb||c.mode===Y?256:0),(0===o&&0===p||b===z)&&xb===C&&(xb=I),xb)}function n(a){if(!a||!a.state)return F;var b=a.state;return b.window&&(b.window=null),a.state=null,C}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?F:(c.head=b,b.done=!1,C)):F}var p,q,r=a("../utils/common"),s=a("./adler32"),t=a("./crc32"),u=a("./inffast"),v=a("./inftrees"),w=0,x=1,y=2,z=4,A=5,B=6,C=0,D=1,E=2,F=-2,G=-3,H=-4,I=-5,J=8,K=1,L=2,M=3,N=4,O=5,P=6,Q=7,R=8,S=9,T=10,U=11,V=12,W=13,X=14,Y=15,Z=16,$=17,_=18,ab=19,bb=20,cb=21,db=22,eb=23,fb=24,gb=25,hb=26,ib=27,jb=28,kb=29,lb=30,mb=31,nb=32,ob=852,pb=592,qb=15,rb=qb,sb=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./inffast":34,"./inftrees":36}],36:[function(a,b){"use strict";var c=a("../utils/common"),d=15,e=852,f=592,g=0,h=1,i=2,j=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],k=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],l=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],m=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,n,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new c.Buf16(d+1),Q=new c.Buf16(d+1),R=null,S=0;for(D=0;d>=D;D++)P[D]=0;for(E=0;o>E;E++)P[b[n+E]]++;for(H=C,G=d;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;G>F&&0===P[F];F++);for(F>H&&(H=F),K=1,D=1;d>=D;D++)if(K<<=1,K-=P[D],0>K)return-1;if(K>0&&(a===g||1!==G))return-1;for(Q[1]=0,D=1;d>D;D++)Q[D+1]=Q[D]+P[D];for(E=0;o>E;E++)0!==b[n+E]&&(r[Q[b[n+E]]++]=E);if(a===g?(N=R=r,y=19):a===h?(N=j,O-=257,R=k,S-=257,y=256):(N=l,R=m,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===h&&L>e||a===i&&L>f)return 1;for(var T=0;;){T++,z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[n+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;G>I+J&&(K-=P[I+J],!(0>=K));)I++,K<<=1;if(L+=1<<I,a===h&&L>e||a===i&&L>f)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":27}],37:[function(a,b){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],38:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a){return 256>a?gb[a]:gb[256+(a>>>7)]}function f(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function g(a,b,c){a.bi_valid>V-c?(a.bi_buf|=b<<a.bi_valid&65535,f(a,a.bi_buf),a.bi_buf=b>>V-a.bi_valid,a.bi_valid+=c-V):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function h(a,b,c){g(a,c[2*b],c[2*b+1])}function i(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function j(a){16===a.bi_valid?(f(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function k(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;U>=f;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,c=a.heap_max+1;T>c;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function l(a,b,c){var d,e,f=new Array(U+1),g=0;for(d=1;U>=d;d++)f[d]=g=g+c[d-1]<<1;for(e=0;b>=e;e++){var h=a[2*e+1];0!==h&&(a[2*e]=i(f[h]++,h))}}function m(){var a,b,c,d,e,f=new Array(U+1);for(c=0,d=0;O-1>d;d++)for(ib[d]=c,a=0;a<1<<_[d];a++)hb[c++]=d;for(hb[c-1]=d,e=0,d=0;16>d;d++)for(jb[d]=e,a=0;a<1<<ab[d];a++)gb[e++]=d;for(e>>=7;R>d;d++)for(jb[d]=e<<7,a=0;a<1<<ab[d]-7;a++)gb[256+e++]=d;for(b=0;U>=b;b++)f[b]=0;for(a=0;143>=a;)eb[2*a+1]=8,a++,f[8]++;for(;255>=a;)eb[2*a+1]=9,a++,f[9]++;for(;279>=a;)eb[2*a+1]=7,a++,f[7]++;for(;287>=a;)eb[2*a+1]=8,a++,f[8]++;for(l(eb,Q+1,f),a=0;R>a;a++)fb[2*a+1]=5,fb[2*a]=i(a,5);kb=new nb(eb,_,P+1,Q,U),lb=new nb(fb,ab,0,R,U),mb=new nb(new Array(0),bb,0,S,W)}function n(a){var b;for(b=0;Q>b;b++)a.dyn_ltree[2*b]=0;for(b=0;R>b;b++)a.dyn_dtree[2*b]=0;for(b=0;S>b;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*X]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function o(a){a.bi_valid>8?f(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function p(a,b,c,d){o(a),d&&(f(a,c),f(a,~c)),E.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function q(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function r(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&q(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!q(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function s(a,b,c){var d,f,i,j,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],f=a.pending_buf[a.l_buf+k],k++,0===d?h(a,f,b):(i=hb[f],h(a,i+P+1,b),j=_[i],0!==j&&(f-=ib[i],g(a,f,j)),d--,i=e(d),h(a,i,c),j=ab[i],0!==j&&(d-=jb[i],g(a,d,j)));while(k<a.last_lit);h(a,X,b)}function t(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=T,c=0;i>c;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=2>j?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)r(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],r(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,r(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],k(a,b),l(f,j,a.bl_count)}function u(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;c>=d;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(j>h?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*Y]++):10>=h?a.bl_tree[2*Z]++:a.bl_tree[2*$]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function v(a,b,c){var d,e,f=-1,i=b[1],j=0,k=7,l=4;for(0===i&&(k=138,l=3),d=0;c>=d;d++)if(e=i,i=b[2*(d+1)+1],!(++j<k&&e===i)){if(l>j){do h(a,e,a.bl_tree);while(0!==--j)}else 0!==e?(e!==f&&(h(a,e,a.bl_tree),j--),h(a,Y,a.bl_tree),g(a,j-3,2)):10>=j?(h(a,Z,a.bl_tree),g(a,j-3,3)):(h(a,$,a.bl_tree),g(a,j-11,7));j=0,f=e,0===i?(k=138,l=3):e===i?(k=6,l=3):(k=7,l=4)}}function w(a){var b;for(u(a,a.dyn_ltree,a.l_desc.max_code),u(a,a.dyn_dtree,a.d_desc.max_code),t(a,a.bl_desc),b=S-1;b>=3&&0===a.bl_tree[2*cb[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function x(a,b,c,d){var e;for(g(a,b-257,5),g(a,c-1,5),g(a,d-4,4),e=0;d>e;e++)g(a,a.bl_tree[2*cb[e]+1],3);v(a,a.dyn_ltree,b-1),v(a,a.dyn_dtree,c-1)}function y(a){var b,c=4093624447;for(b=0;31>=b;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return G;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return H;for(b=32;P>b;b++)if(0!==a.dyn_ltree[2*b])return H;return G}function z(a){pb||(m(),pb=!0),a.l_desc=new ob(a.dyn_ltree,kb),a.d_desc=new ob(a.dyn_dtree,lb),a.bl_desc=new ob(a.bl_tree,mb),a.bi_buf=0,a.bi_valid=0,n(a)}function A(a,b,c,d){g(a,(J<<1)+(d?1:0),3),p(a,b,c,!0)}function B(a){g(a,K<<1,3),h(a,X,eb),j(a)}function C(a,b,c,d){var e,f,h=0;a.level>0?(a.strm.data_type===I&&(a.strm.data_type=y(a)),t(a,a.l_desc),t(a,a.d_desc),h=w(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,e>=f&&(e=f)):e=f=c+5,e>=c+4&&-1!==b?A(a,b,c,d):a.strategy===F||f===e?(g(a,(K<<1)+(d?1:0),3),s(a,eb,fb)):(g(a,(L<<1)+(d?1:0),3),x(a,a.l_desc.max_code+1,a.d_desc.max_code+1,h+1),s(a,a.dyn_ltree,a.dyn_dtree)),n(a),d&&o(a)}function D(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(hb[c]+P+1)]++,a.dyn_dtree[2*e(b)]++),a.last_lit===a.lit_bufsize-1}var E=a("../utils/common"),F=4,G=0,H=1,I=2,J=0,K=1,L=2,M=3,N=258,O=29,P=256,Q=P+1+O,R=30,S=19,T=2*Q+1,U=15,V=16,W=7,X=256,Y=16,Z=17,$=18,_=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ab=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],bb=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],db=512,eb=new Array(2*(Q+2));d(eb);var fb=new Array(2*R);d(fb);var gb=new Array(db);d(gb);var hb=new Array(N-M+1);d(hb);var ib=new Array(O);d(ib);var jb=new Array(R);d(jb);var kb,lb,mb,nb=function(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length},ob=function(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b},pb=!1;c._tr_init=z,c._tr_stored_block=A,c._tr_flush_block=C,c._tr_tally=D,c._tr_align=B},{"../utils/common":27}],39:[function(a,b){"use strict";function c(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=c},{}]},{},[9])(9)});'use strict';if(tr.isVinn){global.JSZip=global.window.JSZip;global.window=undefined;}else if(tr.isNode){const jsZipAbsPath=HTMLImportsLoader.hrefToAbsolutePath('/jszip.min.js');const jsZipModule=require(jsZipAbsPath);global.JSZip=jsZipModule;}'use strict';tr.exportTo('tr.e.importer',function(){function ZipImporter(model,eventData){if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);} +!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,f,g,h,i,j="",k=0;k<a.length;)b=a.charCodeAt(k++),c=a.charCodeAt(k++),e=a.charCodeAt(k++),f=b>>2,g=(3&b)<<4|c>>4,h=(15&c)<<2|e>>6,i=63&e,isNaN(c)?h=i=64:isNaN(e)&&(i=64),j=j+d.charAt(f)+d.charAt(g)+d.charAt(h)+d.charAt(i);return j},c.decode=function(a){var b,c,e,f,g,h,i,j="",k=0;for(a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");k<a.length;)f=d.indexOf(a.charAt(k++)),g=d.indexOf(a.charAt(k++)),h=d.indexOf(a.charAt(k++)),i=d.indexOf(a.charAt(k++)),b=f<<2|g>>4,c=(15&g)<<4|h>>2,e=(3&h)<<6|i,j+=String.fromCharCode(b),64!=h&&(j+=String.fromCharCode(c)),64!=i&&(j+=String.fromCharCode(e));return j}},{}],2:[function(a,b){"use strict";function c(){this.compressedSize=0,this.uncompressedSize=0,this.crc32=0,this.compressionMethod=null,this.compressedContent=null}c.prototype={getContent:function(){return null},getCompressedContent:function(){return null}},b.exports=c},{}],3:[function(a,b,c){"use strict";c.STORE={magic:"\x00\x00",compress:function(a){return a},uncompress:function(a){return a},compressInputType:null,uncompressInputType:null},c.DEFLATE=a("./flate")},{"./flate":8}],4:[function(a,b){"use strict";var c=a("./utils"),d=[0,1996959894,3993919788,2567524794,124634137,1886057615,3915621685,2657392035,249268274,2044508324,3772115230,2547177864,162941995,2125561021,3887607047,2428444049,498536548,1789927666,4089016648,2227061214,450548861,1843258603,4107580753,2211677639,325883990,1684777152,4251122042,2321926636,335633487,1661365465,4195302755,2366115317,997073096,1281953886,3579855332,2724688242,1006888145,1258607687,3524101629,2768942443,901097722,1119000684,3686517206,2898065728,853044451,1172266101,3705015759,2882616665,651767980,1373503546,3369554304,3218104598,565507253,1454621731,3485111705,3099436303,671266974,1594198024,3322730930,2970347812,795835527,1483230225,3244367275,3060149565,1994146192,31158534,2563907772,4023717930,1907459465,112637215,2680153253,3904427059,2013776290,251722036,2517215374,3775830040,2137656763,141376813,2439277719,3865271297,1802195444,476864866,2238001368,4066508878,1812370925,453092731,2181625025,4111451223,1706088902,314042704,2344532202,4240017532,1658658271,366619977,2362670323,4224994405,1303535960,984961486,2747007092,3569037538,1256170817,1037604311,2765210733,3554079995,1131014506,879679996,2909243462,3663771856,1141124467,855842277,2852801631,3708648649,1342533948,654459306,3188396048,3373015174,1466479909,544179635,3110523913,3462522015,1591671054,702138776,2966460450,3352799412,1504918807,783551873,3082640443,3233442989,3988292384,2596254646,62317068,1957810842,3939845945,2647816111,81470997,1943803523,3814918930,2489596804,225274430,2053790376,3826175755,2466906013,167816743,2097651377,4027552580,2265490386,503444072,1762050814,4150417245,2154129355,426522225,1852507879,4275313526,2312317920,282753626,1742555852,4189708143,2394877945,397917763,1622183637,3604390888,2714866558,953729732,1340076626,3518719985,2797360999,1068828381,1219638859,3624741850,2936675148,906185462,1090812512,3747672003,2825379669,829329135,1181335161,3412177804,3160834842,628085408,1382605366,3423369109,3138078467,570562233,1426400815,3317316542,2998733608,733239954,1555261956,3268935591,3050360625,752459403,1541320221,2607071920,3965973030,1969922972,40735498,2617837225,3943577151,1913087877,83908371,2512341634,3803740692,2075208622,213261112,2463272603,3855990285,2094854071,198958881,2262029012,4057260610,1759359992,534414190,2176718541,4139329115,1873836001,414664567,2282248934,4279200368,1711684554,285281116,2405801727,4167216745,1634467795,376229701,2685067896,3608007406,1308918612,956543938,2808555105,3495958263,1231636301,1047427035,2932959818,3654703836,1088359270,936918e3,2847714899,3736837829,1202900863,817233897,3183342108,3401237130,1404277552,615818150,3134207493,3453421203,1423857449,601450431,3009837614,3294710456,1567103746,711928724,3020668471,3272380065,1510334235,755167117];b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var e="string"!==c.getTypeOf(a);"undefined"==typeof b&&(b=0);var f=0,g=0,h=0;b=-1^b;for(var i=0,j=a.length;j>i;i++)h=e?a[i]:a.charCodeAt(i),g=255&(b^h),f=d[g],b=b>>>8^f;return-1^b}},{"./utils":21}],5:[function(a,b){"use strict";function c(){this.data=null,this.length=0,this.index=0}var d=a("./utils");c.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<a||0>a)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return d.transformTo("string",this.readData(a))},readData:function(){},lastIndexOfSignature:function(){},readDate:function(){var a=this.readInt(4);return new Date((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1)}},b.exports=c},{"./utils":21}],6:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!1,c.date=null,c.compression=null,c.comment=null},{}],7:[function(a,b,c){"use strict";var d=a("./utils");c.string2binary=function(a){return d.string2binary(a)},c.string2Uint8Array=function(a){return d.transformTo("uint8array",a)},c.uint8Array2String=function(a){return d.transformTo("string",a)},c.string2Blob=function(a){var b=d.transformTo("arraybuffer",a);return d.arrayBuffer2Blob(b)},c.arrayBuffer2Blob=function(a){return d.arrayBuffer2Blob(a)},c.transformTo=function(a,b){return d.transformTo(a,b)},c.getTypeOf=function(a){return d.getTypeOf(a)},c.checkSupport=function(a){return d.checkSupport(a)},c.MAX_VALUE_16BITS=d.MAX_VALUE_16BITS,c.MAX_VALUE_32BITS=d.MAX_VALUE_32BITS,c.pretty=function(a){return d.pretty(a)},c.findCompression=function(a){return d.findCompression(a)},c.isRegExp=function(a){return d.isRegExp(a)}},{"./utils":21}],8:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,e=a("pako");c.uncompressInputType=d?"uint8array":"array",c.compressInputType=d?"uint8array":"array",c.magic="\b\x00",c.compress=function(a){return e.deflateRaw(a)},c.uncompress=function(a){return e.inflateRaw(a)}},{pako:24}],9:[function(a,b){"use strict";function c(a,b){return this instanceof c?(this.files={},this.comment=null,this.root="",a&&this.load(a,b),void(this.clone=function(){var a=new c;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a})):new c(a,b)}var d=a("./base64");c.prototype=a("./object"),c.prototype.load=a("./load"),c.support=a("./support"),c.defaults=a("./defaults"),c.utils=a("./deprecatedPublicUtils"),c.base64={encode:function(a){return d.encode(a)},decode:function(a){return d.decode(a)}},c.compressions=a("./compressions"),b.exports=c},{"./base64":1,"./compressions":3,"./defaults":6,"./deprecatedPublicUtils":7,"./load":10,"./object":13,"./support":17}],10:[function(a,b){"use strict";var c=a("./base64"),d=a("./zipEntries");b.exports=function(a,b){var e,f,g,h;for(b=b||{},b.base64&&(a=c.decode(a)),f=new d(a,b),e=f.files,g=0;g<e.length;g++)h=e[g],this.file(h.fileName,h.decompressed,{binary:!0,optimizedBinaryString:!0,date:h.date,dir:h.dir,comment:h.fileComment.length?h.fileComment:null,createFolders:b.createFolders});return f.zipComment.length&&(this.comment=f.zipComment),this}},{"./base64":1,"./zipEntries":22}],11:[function(a,b){(function(a){"use strict";b.exports=function(b,c){return new a(b,c)},b.exports.test=function(b){return a.isBuffer(b)}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],12:[function(a,b){"use strict";function c(a){this.data=a,this.length=this.data.length,this.index=0}var d=a("./uint8ArrayReader");c.prototype=new d,c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./uint8ArrayReader":18}],13:[function(a,b){"use strict";var c=a("./support"),d=a("./utils"),e=a("./crc32"),f=a("./signature"),g=a("./defaults"),h=a("./base64"),i=a("./compressions"),j=a("./compressedObject"),k=a("./nodeBuffer"),l=a("./utf8"),m=a("./stringWriter"),n=a("./uint8ArrayWriter"),o=function(a){if(a._data instanceof j&&(a._data=a._data.getContent(),a.options.binary=!0,a.options.base64=!1,"uint8array"===d.getTypeOf(a._data))){var b=a._data;a._data=new Uint8Array(b.length),0!==b.length&&a._data.set(b,0)}return a._data},p=function(a){var b=o(a),e=d.getTypeOf(b);return"string"===e?!a.options.binary&&c.nodebuffer?k(b,"utf-8"):a.asBinary():b},q=function(a){var b=o(this);return null===b||"undefined"==typeof b?"":(this.options.base64&&(b=h.decode(b)),b=a&&this.options.binary?A.utf8decode(b):d.transformTo("string",b),a||this.options.binary||(b=d.transformTo("string",A.utf8encode(b))),b)},r=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this._data=b,this.options=c,this._initialMetadata={dir:c.dir,date:c.date}};r.prototype={asText:function(){return q.call(this,!0)},asBinary:function(){return q.call(this,!1)},asNodeBuffer:function(){var a=p(this);return d.transformTo("nodebuffer",a)},asUint8Array:function(){var a=p(this);return d.transformTo("uint8array",a)},asArrayBuffer:function(){return this.asUint8Array().buffer}};var s=function(a,b){var c,d="";for(c=0;b>c;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},t=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},u=function(a){return a=a||{},a.base64!==!0||null!==a.binary&&void 0!==a.binary||(a.binary=!0),a=t(a,g),a.date=a.date||new Date,null!==a.compression&&(a.compression=a.compression.toUpperCase()),a},v=function(a,b,c){var e,f=d.getTypeOf(b);if(c=u(c),c.createFolders&&(e=w(a))&&x.call(this,e,!0),c.dir||null===b||"undefined"==typeof b)c.base64=!1,c.binary=!1,b=null;else if("string"===f)c.binary&&!c.base64&&c.optimizedBinaryString!==!0&&(b=d.string2binary(b));else{if(c.base64=!1,c.binary=!0,!(f||b instanceof j))throw new Error("The data of '"+a+"' is in an unsupported format !");"arraybuffer"===f&&(b=d.transformTo("uint8array",b))}var g=new r(a,b,c);return this.files[a]=g,g},w=function(a){"/"==a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},x=function(a,b){return"/"!=a.slice(-1)&&(a+="/"),b="undefined"!=typeof b?b:!1,this.files[a]||v.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},y=function(a,b){var c,f=new j;return a._data instanceof j?(f.uncompressedSize=a._data.uncompressedSize,f.crc32=a._data.crc32,0===f.uncompressedSize||a.dir?(b=i.STORE,f.compressedContent="",f.crc32=0):a._data.compressionMethod===b.magic?f.compressedContent=a._data.getCompressedContent():(c=a._data.getContent(),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c)))):(c=p(a),(!c||0===c.length||a.dir)&&(b=i.STORE,c=""),f.uncompressedSize=c.length,f.crc32=e(c),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c))),f.compressedSize=f.compressedContent.length,f.compressionMethod=b.magic,f},z=function(a,b,c,g){var h,i,j,k,m=(c.compressedContent,d.transformTo("string",l.utf8encode(b.name))),n=b.comment||"",o=d.transformTo("string",l.utf8encode(n)),p=m.length!==b.name.length,q=o.length!==n.length,r=b.options,t="",u="",v="";j=b._initialMetadata.dir!==b.dir?b.dir:r.dir,k=b._initialMetadata.date!==b.date?b.date:r.date,h=k.getHours(),h<<=6,h|=k.getMinutes(),h<<=5,h|=k.getSeconds()/2,i=k.getFullYear()-1980,i<<=4,i|=k.getMonth()+1,i<<=5,i|=k.getDate(),p&&(u=s(1,1)+s(e(m),4)+m,t+="up"+s(u.length,2)+u),q&&(v=s(1,1)+s(this.crc32(o),4)+o,t+="uc"+s(v.length,2)+v);var w="";w+="\n\x00",w+=p||q?"\x00\b":"\x00\x00",w+=c.compressionMethod,w+=s(h,2),w+=s(i,2),w+=s(c.crc32,4),w+=s(c.compressedSize,4),w+=s(c.uncompressedSize,4),w+=s(m.length,2),w+=s(t.length,2);var x=f.LOCAL_FILE_HEADER+w+m+t,y=f.CENTRAL_FILE_HEADER+"\x00"+w+s(o.length,2)+"\x00\x00\x00\x00"+(j===!0?"\x00\x00\x00":"\x00\x00\x00\x00")+s(g,4)+m+t+o;return{fileRecord:x,dirRecord:y,compressedObject:c}},A={load:function(){throw new Error("Load method is not defined. Is the file jszip-load.js included ?")},filter:function(a){var b,c,d,e,f=[];for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],e=new r(d.name,d._data,t(d.options)),c=b.slice(this.root.length,b.length),b.slice(0,this.root.length)===this.root&&a(c,e)&&f.push(e));return f},file:function(a,b,c){if(1===arguments.length){if(d.isRegExp(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}return this.filter(function(b,c){return!c.dir&&b===a})[0]||null}return a=this.root+a,v.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d.isRegExp(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=x.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!=a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){a=t(a||{},{base64:!0,compression:"STORE",type:"base64",comment:null}),d.checkSupport(a.type);var b,c,e=[],g=0,j=0,k=d.transformTo("string",this.utf8encode(a.comment||this.comment||""));for(var l in this.files)if(this.files.hasOwnProperty(l)){var o=this.files[l],p=o.options.compression||a.compression.toUpperCase(),q=i[p];if(!q)throw new Error(p+" is not a valid compression method !");var r=y.call(this,o,q),u=z.call(this,l,o,r,g);g+=u.fileRecord.length+r.compressedSize,j+=u.dirRecord.length,e.push(u)}var v="";v=f.CENTRAL_DIRECTORY_END+"\x00\x00\x00\x00"+s(e.length,2)+s(e.length,2)+s(j,4)+s(g,4)+s(k.length,2)+k;var w=a.type.toLowerCase();for(b="uint8array"===w||"arraybuffer"===w||"blob"===w||"nodebuffer"===w?new n(g+j+v.length):new m(g+j+v.length),c=0;c<e.length;c++)b.append(e[c].fileRecord),b.append(e[c].compressedObject.compressedContent);for(c=0;c<e.length;c++)b.append(e[c].dirRecord);b.append(v);var x=b.finalize();switch(a.type.toLowerCase()){case"uint8array":case"arraybuffer":case"nodebuffer":return d.transformTo(a.type.toLowerCase(),x);case"blob":return d.arrayBuffer2Blob(d.transformTo("arraybuffer",x));case"base64":return a.base64?h.encode(x):x;default:return x}},crc32:function(a,b){return e(a,b)},utf8encode:function(a){return d.transformTo("string",l.utf8encode(a))},utf8decode:function(a){return l.utf8decode(a)}};b.exports=A},{"./base64":1,"./compressedObject":2,"./compressions":3,"./crc32":4,"./defaults":6,"./nodeBuffer":11,"./signature":14,"./stringWriter":16,"./support":17,"./uint8ArrayWriter":19,"./utf8":20,"./utils":21}],14:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],15:[function(a,b){"use strict";function c(a,b){this.data=a,b||(this.data=e.string2binary(this.data)),this.length=this.data.length,this.index=0}var d=a("./dataReader"),e=a("./utils");c.prototype=new d,c.prototype.byteAt=function(a){return this.data.charCodeAt(a)},c.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)},c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5,"./utils":21}],16:[function(a,b){"use strict";var c=a("./utils"),d=function(){this.data=[]};d.prototype={append:function(a){a=c.transformTo("string",a),this.data.push(a)},finalize:function(){return this.data.join("")}},b.exports=d},{"./utils":21}],17:[function(a,b,c){(function(a){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof a,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var b=new ArrayBuffer(0);try{c.blob=0===new Blob([b],{type:"application/zip"}).size}catch(d){try{var e=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,f=new e;f.append(b),c.blob=0===f.getBlob("application/zip").size}catch(d){c.blob=!1}}}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],18:[function(a,b){"use strict";function c(a){a&&(this.data=a,this.length=this.data.length,this.index=0)}var d=a("./dataReader");c.prototype=new d,c.prototype.byteAt=function(a){return this.data[a]},c.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f;return-1},c.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5}],19:[function(a,b){"use strict";var c=a("./utils"),d=function(a){this.data=new Uint8Array(a),this.index=0};d.prototype={append:function(a){0!==a.length&&(a=c.transformTo("uint8array",a),this.data.set(a,this.index),this.index+=a.length)},finalize:function(){return this.data}},b.exports=d},{"./utils":21}],20:[function(a,b,c){"use strict";for(var d=a("./utils"),e=a("./support"),f=a("./nodeBuffer"),g=new Array(256),h=0;256>h;h++)g[h]=h>=252?6:h>=248?5:h>=240?4:h>=224?3:h>=192?2:1;g[254]=g[254]=1;var i=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=e.uint8array?new Uint8Array(i):new Array(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},j=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+g[a[c]]>b?c:b},k=function(a){var b,c,e,f,h=a.length,i=new Array(2*h);for(c=0,b=0;h>b;)if(e=a[b++],128>e)i[c++]=e;else if(f=g[e],f>4)i[c++]=65533,b+=f-1;else{for(e&=2===f?31:3===f?15:7;f>1&&h>b;)e=e<<6|63&a[b++],f--;f>1?i[c++]=65533:65536>e?i[c++]=e:(e-=65536,i[c++]=55296|e>>10&1023,i[c++]=56320|1023&e)}return i.length!==c&&(i.subarray?i=i.subarray(0,c):i.length=c),d.applyFromCharCode(i)};c.utf8encode=function(a){return e.nodebuffer?f(a,"utf-8"):i(a)},c.utf8decode=function(a){if(e.nodebuffer)return d.transformTo("nodebuffer",a).toString("utf-8");a=d.transformTo(e.uint8array?"uint8array":"array",a);for(var b=[],c=0,f=a.length,g=65536;f>c;){var h=j(a,Math.min(c+g,f));b.push(e.uint8array?k(a.subarray(c,h)):k(a.slice(c,h))),c=h}return b.join("")}},{"./nodeBuffer":11,"./support":17,"./utils":21}],21:[function(a,b,c){"use strict";function d(a){return a}function e(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function f(a){var b=65536,d=[],e=a.length,f=c.getTypeOf(a),g=0,h=!0;try{switch(f){case"uint8array":String.fromCharCode.apply(null,new Uint8Array(0));break;case"nodebuffer":String.fromCharCode.apply(null,j(0))}}catch(i){h=!1}if(!h){for(var k="",l=0;l<a.length;l++)k+=String.fromCharCode(a[l]);return k}for(;e>g&&b>1;)try{d.push("array"===f||"nodebuffer"===f?String.fromCharCode.apply(null,a.slice(g,Math.min(g+b,e))):String.fromCharCode.apply(null,a.subarray(g,Math.min(g+b,e)))),g+=b}catch(i){b=Math.floor(b/2)}return d.join("")}function g(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];return b}var h=a("./support"),i=a("./compressions"),j=a("./nodeBuffer");c.string2binary=function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(255&a.charCodeAt(c));return b},c.arrayBuffer2Blob=function(a){c.checkSupport("blob");try{return new Blob([a],{type:"application/zip"})}catch(b){try{var d=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,e=new d;return e.append(a),e.getBlob("application/zip")}catch(b){throw new Error("Bug : can't construct the Blob.")}}},c.applyFromCharCode=f;var k={};k.string={string:d,array:function(a){return e(a,new Array(a.length))},arraybuffer:function(a){return k.string.uint8array(a).buffer},uint8array:function(a){return e(a,new Uint8Array(a.length))},nodebuffer:function(a){return e(a,j(a.length))}},k.array={string:f,array:d,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(a)}},k.arraybuffer={string:function(a){return f(new Uint8Array(a))},array:function(a){return g(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:d,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(new Uint8Array(a))}},k.uint8array={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:d,nodebuffer:function(a){return j(a)}},k.nodebuffer={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return k.nodebuffer.uint8array(a).buffer},uint8array:function(a){return g(a,new Uint8Array(a.length))},nodebuffer:d},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=k[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":h.nodebuffer&&j.test(a)?"nodebuffer":h.uint8array&&a instanceof Uint8Array?"uint8array":h.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=h[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this browser")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(16>b?"0":"")+b.toString(16).toUpperCase();return d},c.findCompression=function(a){for(var b in i)if(i.hasOwnProperty(b)&&i[b].magic===a)return i[b];return null},c.isRegExp=function(a){return"[object RegExp]"===Object.prototype.toString.call(a)}},{"./compressions":3,"./nodeBuffer":11,"./support":17}],22:[function(a,b){"use strict";function c(a,b){this.files=[],this.loadOptions=b,a&&this.load(a)}var d=a("./stringReader"),e=a("./nodeBufferReader"),f=a("./uint8ArrayReader"),g=a("./utils"),h=a("./signature"),i=a("./zipEntry"),j=a("./support"),k=a("./object");c.prototype={checkSignature:function(a){var b=this.reader.readString(4);if(b!==a)throw new Error("Corrupted zip or bug : unexpected signature ("+g.pretty(b)+", expected "+g.pretty(a)+")")},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2),this.zipComment=this.reader.readString(this.zipCommentLength),this.zipComment=k.utf8decode(this.zipComment)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.versionMadeBy=this.reader.readString(2),this.versionNeeded=this.reader.readInt(2),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;d>e;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readString(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(h.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readString(4)===h.CENTRAL_FILE_HEADER;)a=new i({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(h.CENTRAL_DIRECTORY_END);if(-1===a)throw new Error("Corrupted zip : can't find end of central directory");if(this.reader.setIndex(a),this.checkSignature(h.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===g.MAX_VALUE_16BITS||this.diskWithCentralDirStart===g.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===g.MAX_VALUE_16BITS||this.centralDirRecords===g.MAX_VALUE_16BITS||this.centralDirSize===g.MAX_VALUE_32BITS||this.centralDirOffset===g.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),-1===a)throw new Error("Corrupted zip : can't find the ZIP64 end of central directory locator");this.reader.setIndex(a),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}},prepareReader:function(a){var b=g.getTypeOf(a);this.reader="string"!==b||j.uint8array?"nodebuffer"===b?new e(a):new f(g.transformTo("uint8array",a)):new d(a,this.loadOptions.optimizedBinaryString)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=c},{"./nodeBufferReader":12,"./object":13,"./signature":14,"./stringReader":15,"./support":17,"./uint8ArrayReader":18,"./utils":21,"./zipEntry":23}],23:[function(a,b){"use strict";function c(a,b){this.options=a,this.loadOptions=b}var d=a("./stringReader"),e=a("./utils"),f=a("./compressedObject"),g=a("./object");c.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},prepareCompressedContent:function(a,b,c){return function(){var d=a.index;a.setIndex(b);var e=a.readData(c);return a.setIndex(d),e}},prepareContent:function(a,b,c,d,f){return function(){var a=e.transformTo(d.uncompressInputType,this.getCompressedContent()),b=d.uncompress(a);if(b.length!==f)throw new Error("Bug : uncompressed data size mismatch");return b}},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readString(this.fileNameLength),a.skip(c),-1==this.compressedSize||-1==this.uncompressedSize)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize == -1 || uncompressedSize == -1)");if(b=e.findCompression(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+e.pretty(this.compressionMethod)+" unknown (inner file : "+this.fileName+")");if(this.decompressed=new f,this.decompressed.compressedSize=this.compressedSize,this.decompressed.uncompressedSize=this.uncompressedSize,this.decompressed.crc32=this.crc32,this.decompressed.compressionMethod=this.compressionMethod,this.decompressed.getCompressedContent=this.prepareCompressedContent(a,a.index,this.compressedSize,b),this.decompressed.getContent=this.prepareContent(a,a.index,this.compressedSize,b,this.uncompressedSize),this.loadOptions.checkCRC32&&(this.decompressed=e.transformTo("string",this.decompressed.getContent()),g.crc32(this.decompressed)!==this.crc32))throw new Error("Corrupted zip : CRC32 mismatch")},readCentralPart:function(a){if(this.versionMadeBy=a.readString(2),this.versionNeeded=a.readInt(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4),this.fileNameLength=a.readInt(2),this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");this.fileName=a.readString(this.fileNameLength),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readString(this.fileCommentLength),this.dir=16&this.externalFileAttributes?!0:!1},parseZIP64ExtraField:function(){if(this.extraFields[1]){var a=new d(this.extraFields[1].value);this.uncompressedSize===e.MAX_VALUE_32BITS&&(this.uncompressedSize=a.readInt(8)),this.compressedSize===e.MAX_VALUE_32BITS&&(this.compressedSize=a.readInt(8)),this.localHeaderOffset===e.MAX_VALUE_32BITS&&(this.localHeaderOffset=a.readInt(8)),this.diskNumberStart===e.MAX_VALUE_32BITS&&(this.diskNumberStart=a.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index;for(this.extraFields=this.extraFields||{};a.index<e+this.extraFieldsLength;)b=a.readInt(2),c=a.readInt(2),d=a.readString(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){if(this.useUTF8())this.fileName=g.utf8decode(this.fileName),this.fileComment=g.utf8decode(this.fileComment);else{var a=this.findExtraFieldUnicodePath();null!==a&&(this.fileName=a);var b=this.findExtraFieldUnicodeComment();null!==b&&(this.fileComment=b)}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileName)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileComment)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null}},b.exports=c},{"./compressedObject":2,"./object":13,"./stringReader":15,"./utils":21}],24:[function(a,b){"use strict";var c=a("./lib/utils/common").assign,d=a("./lib/deflate"),e=a("./lib/inflate"),f=a("./lib/zlib/constants"),g={};c(g,d,e,f),b.exports=g},{"./lib/deflate":25,"./lib/inflate":26,"./lib/utils/common":27,"./lib/zlib/constants":30}],25:[function(a,b,c){"use strict";function d(a,b){var c=new s(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}function f(a,b){return b=b||{},b.gzip=!0,d(a,b)}var g=a("./zlib/deflate.js"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=0,m=4,n=0,o=1,p=-1,q=0,r=8,s=function(a){this.options=h.assign({level:p,method:r,chunkSize:16384,windowBits:15,memLevel:8,strategy:q,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=g.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==n)throw new Error(j[c]);b.header&&g.deflateSetHeader(this.strm,b.header)};s.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?m:l,e.input="string"==typeof a?i.string2buf(a):a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new h.Buf8(f),e.next_out=0,e.avail_out=f),c=g.deflate(e,d),c!==o&&c!==n)return this.onEnd(c),this.ended=!0,!1;(0===e.avail_out||0===e.avail_in&&d===m)&&this.onData("string"===this.options.to?i.buf2binstring(h.shrinkBuf(e.output,e.next_out)):h.shrinkBuf(e.output,e.next_out))}while((e.avail_in>0||0===e.avail_out)&&c!==o);return d===m?(c=g.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===n):!0},s.prototype.onData=function(a){this.chunks.push(a)},s.prototype.onEnd=function(a){a===n&&(this.result="string"===this.options.to?this.chunks.join(""):h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=s,c.deflate=d,c.deflateRaw=e,c.gzip=f},{"./utils/common":27,"./utils/strings":28,"./zlib/deflate.js":32,"./zlib/messages":37,"./zlib/zstream":39}],26:[function(a,b,c){"use strict";function d(a,b){var c=new m(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}var f=a("./zlib/inflate.js"),g=a("./utils/common"),h=a("./utils/strings"),i=a("./zlib/constants"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=a("./zlib/gzheader"),m=function(a){this.options=g.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=f.inflateInit2(this.strm,b.windowBits);if(c!==i.Z_OK)throw new Error(j[c]);this.header=new l,f.inflateGetHeader(this.strm,this.header)};m.prototype.push=function(a,b){var c,d,e,j,k,l=this.strm,m=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?i.Z_FINISH:i.Z_NO_FLUSH,l.input="string"==typeof a?h.binstring2buf(a):a,l.next_in=0,l.avail_in=l.input.length;do{if(0===l.avail_out&&(l.output=new g.Buf8(m),l.next_out=0,l.avail_out=m),c=f.inflate(l,i.Z_NO_FLUSH),c!==i.Z_STREAM_END&&c!==i.Z_OK)return this.onEnd(c),this.ended=!0,!1;l.next_out&&(0===l.avail_out||c===i.Z_STREAM_END||0===l.avail_in&&d===i.Z_FINISH)&&("string"===this.options.to?(e=h.utf8border(l.output,l.next_out),j=l.next_out-e,k=h.buf2string(l.output,e),l.next_out=j,l.avail_out=m-j,j&&g.arraySet(l.output,l.output,e,j,0),this.onData(k)):this.onData(g.shrinkBuf(l.output,l.next_out)))}while(l.avail_in>0&&c!==i.Z_STREAM_END);return c===i.Z_STREAM_END&&(d=i.Z_FINISH),d===i.Z_FINISH?(c=f.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===i.Z_OK):!0},m.prototype.onData=function(a){this.chunks.push(a)},m.prototype.onEnd=function(a){a===i.Z_OK&&(this.result="string"===this.options.to?this.chunks.join(""):g.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=m,c.inflate=d,c.inflateRaw=e,c.ungzip=d},{"./utils/common":27,"./utils/strings":28,"./zlib/constants":30,"./zlib/gzheader":33,"./zlib/inflate.js":35,"./zlib/messages":37,"./zlib/zstream":39}],27:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;c>b;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;c>b;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],28:[function(a,b,c){"use strict";function d(a,b){if(65537>b&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;b>d;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;256>j;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=new e.Buf8(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;d>c;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;h>c;)if(f=a[c++],128>f)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&h>c;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:65536>f?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":27}],29:[function(a,b){"use strict";function c(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0}b.exports=c},{}],30:[function(a,b){b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],31:[function(a,b){"use strict";function c(){for(var a,b=[],c=0;256>c;c++){a=c;for(var d=0;8>d;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function d(a,b,c,d){var f=e,g=d+c;a=-1^a;for(var h=d;g>h;h++)a=a>>>8^f[255&(a^b[h])];return-1^a}var e=c();b.exports=d},{}],32:[function(a,b,c){"use strict";function d(a,b){return a.msg=G[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(C.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){D._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,C.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=E(a.adler,b,e,c):2===a.state.wrap&&(a.adler=F(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-jb?a.strstart-(a.w_size-jb):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ib,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&m>f);if(d=ib-(m-f),f=m-ib,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-jb)){C.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=hb)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+hb-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<hb)););}while(a.lookahead<jb&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===H)return sb;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return sb;if(a.strstart-a.block_start>=a.w_size-jb&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?sb:sb}function o(a,b){for(var c,d;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c)),a.match_length>=hb)if(d=D._tr_tally(a,a.strstart-a.match_start,a.match_length-hb),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=hb){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function p(a,b){for(var c,d,e;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=hb-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===S||a.match_length===hb&&a.strstart-a.match_start>4096)&&(a.match_length=hb-1)),a.prev_length>=hb&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-hb,d=D._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-hb),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=hb-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return sb}else if(a.match_available){if(d=D._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return sb}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=D._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ib){if(m(a),a.lookahead<=ib&&b===H)return sb;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=hb&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ib;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&f>e);a.match_length=ib-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=hb?(c=D._tr_tally(a,1,a.match_length-hb),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===H)return sb;break}if(a.match_length=0,c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function s(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=B[a.level].max_lazy,a.good_match=B[a.level].good_length,a.nice_match=B[a.level].nice_length,a.max_chain_length=B[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=hb-1,a.match_available=0,a.ins_h=0}function t(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=Y,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new C.Buf16(2*fb),this.dyn_dtree=new C.Buf16(2*(2*db+1)),this.bl_tree=new C.Buf16(2*(2*eb+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new C.Buf16(gb+1),this.heap=new C.Buf16(2*cb+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new C.Buf16(2*cb+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function u(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=X,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?lb:qb,a.adler=2===b.wrap?0:1,b.last_flush=H,D._tr_init(b),M):d(a,O)}function v(a){var b=u(a);return b===M&&s(a.state),b}function w(a,b){return a&&a.state?2!==a.state.wrap?O:(a.state.gzhead=b,M):O}function x(a,b,c,e,f,g){if(!a)return O;var h=1;if(b===R&&(b=6),0>e?(h=0,e=-e):e>15&&(h=2,e-=16),1>f||f>Z||c!==Y||8>e||e>15||0>b||b>9||0>g||g>V)return d(a,O);8===e&&(e=9);var i=new t;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+hb-1)/hb),i.window=new C.Buf8(2*i.w_size),i.head=new C.Buf16(i.hash_size),i.prev=new C.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new C.Buf8(i.pending_buf_size),i.d_buf=i.lit_bufsize>>1,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,v(a)}function y(a,b){return x(a,b,Y,$,_,W)}function z(a,b){var c,h,k,l;if(!a||!a.state||b>L||0>b)return a?d(a,O):O;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===rb&&b!==K)return d(a,0===a.avail_out?Q:O);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===lb)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=F(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=mb):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,wb),h.status=qb);else{var m=Y+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=T||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=kb),m+=31-m%31,h.status=qb,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===mb)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=nb)}else h.status=nb;if(h.status===nb)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=ob)}else h.status=ob;if(h.status===ob)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=pb)}else h.status=pb;if(h.status===pb&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=qb)):h.status=qb),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,M}else if(0===a.avail_in&&e(b)<=e(c)&&b!==K)return d(a,Q);if(h.status===rb&&0!==a.avail_in)return d(a,Q);if(0!==a.avail_in||0!==h.lookahead||b!==H&&h.status!==rb){var o=h.strategy===T?r(h,b):h.strategy===U?q(h,b):B[h.level].func(h,b);if((o===ub||o===vb)&&(h.status=rb),o===sb||o===ub)return 0===a.avail_out&&(h.last_flush=-1),M;if(o===tb&&(b===I?D._tr_align(h):b!==L&&(D._tr_stored_block(h,0,0,!1),b===J&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,M}return b!==K?M:h.wrap<=0?N:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?M:N)}function A(a){var b;return a&&a.state?(b=a.state.status,b!==lb&&b!==mb&&b!==nb&&b!==ob&&b!==pb&&b!==qb&&b!==rb?d(a,O):(a.state=null,b===qb?d(a,P):M)):O}var B,C=a("../utils/common"),D=a("./trees"),E=a("./adler32"),F=a("./crc32"),G=a("./messages"),H=0,I=1,J=3,K=4,L=5,M=0,N=1,O=-2,P=-3,Q=-5,R=-1,S=1,T=2,U=3,V=4,W=0,X=2,Y=8,Z=9,$=15,_=8,ab=29,bb=256,cb=bb+1+ab,db=30,eb=19,fb=2*cb+1,gb=15,hb=3,ib=258,jb=ib+hb+1,kb=32,lb=42,mb=69,nb=73,ob=91,pb=103,qb=113,rb=666,sb=1,tb=2,ub=3,vb=4,wb=3,xb=function(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e};B=[new xb(0,0,0,0,n),new xb(4,4,8,4,o),new xb(4,5,16,8,o),new xb(4,6,32,32,o),new xb(4,4,16,16,p),new xb(8,16,32,32,p),new xb(8,16,128,128,p),new xb(8,32,128,256,p),new xb(32,128,258,1024,p),new xb(32,258,258,4096,p)],c.deflateInit=y,c.deflateInit2=x,c.deflateReset=v,c.deflateResetKeep=u,c.deflateSetHeader=w,c.deflate=z,c.deflateEnd=A,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./messages":37,"./trees":38}],33:[function(a,b){"use strict";function c(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=c},{}],34:[function(a,b){"use strict";var c=30,d=12;b.exports=function(a,b){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;e=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=e.dmax,l=e.wsize,m=e.whave,n=e.wnext,o=e.window,p=e.hold,q=e.bits,r=e.lencode,s=e.distcode,t=(1<<e.lenbits)-1,u=(1<<e.distbits)-1;a:do{15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){e.mode=d;break a}a.msg="invalid literal/length code",e.mode=c;break a}x=65535&v,w&=15,w&&(w>q&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",e.mode=c;break a}if(y=65535&v,w&=15,w>q&&(p+=B[f++]<<q,q+=8,w>q&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",e.mode=c;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&e.sane){a.msg="invalid distance too far back",e.mode=c;break a}if(z=0,A=o,0===n){if(z+=l-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(w>n){if(z+=l+n-w,w-=n,x>w){x-=w;do C[h++]=o[z++];while(--w);if(z=0,x>n){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(g>f&&j>h);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=g>f?5+(g-f):5-(f-g),a.avail_out=j>h?257+(j-h):257-(h-j),e.hold=p,e.bits=q}},{}],35:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new r.Buf16(320),this.work=new r.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=K,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new r.Buf32(ob),b.distcode=b.distdyn=new r.Buf32(pb),b.sane=1,b.back=-1,C):F}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):F}function h(a,b){var c,d;return a&&a.state?(d=a.state,0>b?(c=0,b=-b):(c=(b>>4)+1,48>b&&(b&=15)),b&&(8>b||b>15)?F:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):F}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==C&&(a.state=null),c):F}function j(a){return i(a,rb)}function k(a){if(sb){var b;for(p=new r.Buf32(512),q=new r.Buf32(32),b=0;144>b;)a.lens[b++]=8;for(;256>b;)a.lens[b++]=9;for(;280>b;)a.lens[b++]=7;for(;288>b;)a.lens[b++]=8;for(v(x,a.lens,0,288,p,0,a.work,{bits:9}),b=0;32>b;)a.lens[b++]=5;v(y,a.lens,0,32,q,0,a.work,{bits:5}),sb=!1}a.lencode=p,a.lenbits=9,a.distcode=q,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new r.Buf8(f.wsize)),d>=f.wsize?(r.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),r.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(r.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,ob,pb,qb,rb,sb,tb,ub,vb,wb,xb,yb,zb,Ab=0,Bb=new r.Buf8(4),Cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return F;c=a.state,c.mode===V&&(c.mode=W),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xb=C;a:for(;;)switch(c.mode){case K:if(0===c.wrap){c.mode=W;break}for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0),m=0,n=0,c.mode=L;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=lb;break}if((15&m)!==J){a.msg="unknown compression method",c.mode=lb;break}if(m>>>=4,n-=4,wb=(15&m)+8,0===c.wbits)c.wbits=wb;else if(wb>c.wbits){a.msg="invalid window size",c.mode=lb;break}c.dmax=1<<wb,a.adler=c.check=1,c.mode=512&m?T:V,m=0,n=0;break;case L:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==J){a.msg="unknown compression method",c.mode=lb;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=lb;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=M;case M:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,Bb[2]=m>>>16&255,Bb[3]=m>>>24&255,c.check=t(c.check,Bb,4,0)),m=0,n=0,c.mode=N;case N:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=O;case O:if(1024&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=P;case P:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wb=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),r.arraySet(c.head.extra,e,g,q,wb)),512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=Q;case Q:if(2048&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.name+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=R;case R:if(4096&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.comment+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.comment=null);c.mode=S;case S:if(512&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=lb;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=V;break;case T:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=U;case U:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,E;a.adler=c.check=1,c.mode=V;case V:if(b===A||b===B)break a;case W:if(c.last){m>>>=7&n,n-=7&n,c.mode=ib;break}for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=X;break;case 1:if(k(c),c.mode=bb,b===B){m>>>=2,n-=2;break a}break;case 2:c.mode=$;break;case 3:a.msg="invalid block type",c.mode=lb}m>>>=2,n-=2;break;case X:for(m>>>=7&n,n-=7&n;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=lb;break}if(c.length=65535&m,m=0,n=0,c.mode=Y,b===B)break a;case Y:c.mode=Z;case Z:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;r.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=V;break;case $:for(;14>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=lb;break}c.have=0,c.mode=_;case _:for(;c.have<c.ncode;){for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Cb[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Cb[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,yb={bits:c.lenbits},xb=v(w,c.lens,0,19,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid code lengths set",c.mode=lb;break}c.have=0,c.mode=ab;case ab:for(;c.have<c.nlen+c.ndist;){for(;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(16>sb)m>>>=qb,n-=qb,c.lens[c.have++]=sb;else{if(16===sb){for(zb=qb+2;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qb,n-=qb,0===c.have){a.msg="invalid bit length repeat",c.mode=lb;break}wb=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sb){for(zb=qb+3;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=3+(7&m),m>>>=3,n-=3}else{for(zb=qb+7;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=lb;break}for(;q--;)c.lens[c.have++]=wb}}if(c.mode===lb)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=lb;break}if(c.lenbits=9,yb={bits:c.lenbits},xb=v(x,c.lens,0,c.nlen,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid literal/lengths set",c.mode=lb;break}if(c.distbits=6,c.distcode=c.distdyn,yb={bits:c.distbits},xb=v(y,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,yb),c.distbits=yb.bits,xb){a.msg="invalid distances set",c.mode=lb;break}if(c.mode=bb,b===B)break a;case bb:c.mode=cb;case cb:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,u(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===V&&(c.back=-1);break}for(c.back=0;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(rb&&0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.lencode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,c.length=sb,0===rb){c.mode=hb;break}if(32&rb){c.back=-1,c.mode=V;break}if(64&rb){a.msg="invalid literal/length code",c.mode=lb;break}c.extra=15&rb,c.mode=db;case db:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=eb;case eb:for(;Ab=c.distcode[m&(1<<c.distbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.distcode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,64&rb){a.msg="invalid distance code",c.mode=lb;break}c.offset=sb,c.extra=15&rb,c.mode=fb;case fb:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=lb;break}c.mode=gb;case gb:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.sane){a.msg="invalid distance too far back",c.mode=lb;break}q>c.wnext?(q-=c.wnext,ob=c.wsize-q):ob=c.wnext-q,q>c.length&&(q=c.length),pb=c.window}else pb=f,ob=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pb[ob++];while(--q);0===c.length&&(c.mode=cb);break;case hb:if(0===j)break a;f[h++]=c.length,j--,c.mode=cb;break;case ib:if(c.wrap){for(;32>n;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?t(c.check,f,p,h-p):s(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=lb;break}m=0,n=0}c.mode=jb;case jb:if(c.wrap&&c.flags){for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=lb;break}m=0,n=0}c.mode=kb;case kb:xb=D;break a;case lb:xb=G;break a;case mb:return H;case nb:default:return F}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<lb&&(c.mode<ib||b!==z))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=mb,H):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?t(c.check,f,p,a.next_out-p):s(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===V?128:0)+(c.mode===bb||c.mode===Y?256:0),(0===o&&0===p||b===z)&&xb===C&&(xb=I),xb)}function n(a){if(!a||!a.state)return F;var b=a.state;return b.window&&(b.window=null),a.state=null,C}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?F:(c.head=b,b.done=!1,C)):F}var p,q,r=a("../utils/common"),s=a("./adler32"),t=a("./crc32"),u=a("./inffast"),v=a("./inftrees"),w=0,x=1,y=2,z=4,A=5,B=6,C=0,D=1,E=2,F=-2,G=-3,H=-4,I=-5,J=8,K=1,L=2,M=3,N=4,O=5,P=6,Q=7,R=8,S=9,T=10,U=11,V=12,W=13,X=14,Y=15,Z=16,$=17,_=18,ab=19,bb=20,cb=21,db=22,eb=23,fb=24,gb=25,hb=26,ib=27,jb=28,kb=29,lb=30,mb=31,nb=32,ob=852,pb=592,qb=15,rb=qb,sb=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./inffast":34,"./inftrees":36}],36:[function(a,b){"use strict";var c=a("../utils/common"),d=15,e=852,f=592,g=0,h=1,i=2,j=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],k=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],l=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],m=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,n,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new c.Buf16(d+1),Q=new c.Buf16(d+1),R=null,S=0;for(D=0;d>=D;D++)P[D]=0;for(E=0;o>E;E++)P[b[n+E]]++;for(H=C,G=d;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;G>F&&0===P[F];F++);for(F>H&&(H=F),K=1,D=1;d>=D;D++)if(K<<=1,K-=P[D],0>K)return-1;if(K>0&&(a===g||1!==G))return-1;for(Q[1]=0,D=1;d>D;D++)Q[D+1]=Q[D]+P[D];for(E=0;o>E;E++)0!==b[n+E]&&(r[Q[b[n+E]]++]=E);if(a===g?(N=R=r,y=19):a===h?(N=j,O-=257,R=k,S-=257,y=256):(N=l,R=m,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===h&&L>e||a===i&&L>f)return 1;for(var T=0;;){T++,z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[n+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;G>I+J&&(K-=P[I+J],!(0>=K));)I++,K<<=1;if(L+=1<<I,a===h&&L>e||a===i&&L>f)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":27}],37:[function(a,b){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],38:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a){return 256>a?gb[a]:gb[256+(a>>>7)]}function f(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function g(a,b,c){a.bi_valid>V-c?(a.bi_buf|=b<<a.bi_valid&65535,f(a,a.bi_buf),a.bi_buf=b>>V-a.bi_valid,a.bi_valid+=c-V):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function h(a,b,c){g(a,c[2*b],c[2*b+1])}function i(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function j(a){16===a.bi_valid?(f(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function k(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;U>=f;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,c=a.heap_max+1;T>c;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function l(a,b,c){var d,e,f=new Array(U+1),g=0;for(d=1;U>=d;d++)f[d]=g=g+c[d-1]<<1;for(e=0;b>=e;e++){var h=a[2*e+1];0!==h&&(a[2*e]=i(f[h]++,h))}}function m(){var a,b,c,d,e,f=new Array(U+1);for(c=0,d=0;O-1>d;d++)for(ib[d]=c,a=0;a<1<<_[d];a++)hb[c++]=d;for(hb[c-1]=d,e=0,d=0;16>d;d++)for(jb[d]=e,a=0;a<1<<ab[d];a++)gb[e++]=d;for(e>>=7;R>d;d++)for(jb[d]=e<<7,a=0;a<1<<ab[d]-7;a++)gb[256+e++]=d;for(b=0;U>=b;b++)f[b]=0;for(a=0;143>=a;)eb[2*a+1]=8,a++,f[8]++;for(;255>=a;)eb[2*a+1]=9,a++,f[9]++;for(;279>=a;)eb[2*a+1]=7,a++,f[7]++;for(;287>=a;)eb[2*a+1]=8,a++,f[8]++;for(l(eb,Q+1,f),a=0;R>a;a++)fb[2*a+1]=5,fb[2*a]=i(a,5);kb=new nb(eb,_,P+1,Q,U),lb=new nb(fb,ab,0,R,U),mb=new nb(new Array(0),bb,0,S,W)}function n(a){var b;for(b=0;Q>b;b++)a.dyn_ltree[2*b]=0;for(b=0;R>b;b++)a.dyn_dtree[2*b]=0;for(b=0;S>b;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*X]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function o(a){a.bi_valid>8?f(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function p(a,b,c,d){o(a),d&&(f(a,c),f(a,~c)),E.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function q(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function r(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&q(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!q(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function s(a,b,c){var d,f,i,j,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],f=a.pending_buf[a.l_buf+k],k++,0===d?h(a,f,b):(i=hb[f],h(a,i+P+1,b),j=_[i],0!==j&&(f-=ib[i],g(a,f,j)),d--,i=e(d),h(a,i,c),j=ab[i],0!==j&&(d-=jb[i],g(a,d,j)));while(k<a.last_lit);h(a,X,b)}function t(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=T,c=0;i>c;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=2>j?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)r(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],r(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,r(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],k(a,b),l(f,j,a.bl_count)}function u(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;c>=d;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(j>h?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*Y]++):10>=h?a.bl_tree[2*Z]++:a.bl_tree[2*$]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function v(a,b,c){var d,e,f=-1,i=b[1],j=0,k=7,l=4;for(0===i&&(k=138,l=3),d=0;c>=d;d++)if(e=i,i=b[2*(d+1)+1],!(++j<k&&e===i)){if(l>j){do h(a,e,a.bl_tree);while(0!==--j)}else 0!==e?(e!==f&&(h(a,e,a.bl_tree),j--),h(a,Y,a.bl_tree),g(a,j-3,2)):10>=j?(h(a,Z,a.bl_tree),g(a,j-3,3)):(h(a,$,a.bl_tree),g(a,j-11,7));j=0,f=e,0===i?(k=138,l=3):e===i?(k=6,l=3):(k=7,l=4)}}function w(a){var b;for(u(a,a.dyn_ltree,a.l_desc.max_code),u(a,a.dyn_dtree,a.d_desc.max_code),t(a,a.bl_desc),b=S-1;b>=3&&0===a.bl_tree[2*cb[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function x(a,b,c,d){var e;for(g(a,b-257,5),g(a,c-1,5),g(a,d-4,4),e=0;d>e;e++)g(a,a.bl_tree[2*cb[e]+1],3);v(a,a.dyn_ltree,b-1),v(a,a.dyn_dtree,c-1)}function y(a){var b,c=4093624447;for(b=0;31>=b;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return G;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return H;for(b=32;P>b;b++)if(0!==a.dyn_ltree[2*b])return H;return G}function z(a){pb||(m(),pb=!0),a.l_desc=new ob(a.dyn_ltree,kb),a.d_desc=new ob(a.dyn_dtree,lb),a.bl_desc=new ob(a.bl_tree,mb),a.bi_buf=0,a.bi_valid=0,n(a)}function A(a,b,c,d){g(a,(J<<1)+(d?1:0),3),p(a,b,c,!0)}function B(a){g(a,K<<1,3),h(a,X,eb),j(a)}function C(a,b,c,d){var e,f,h=0;a.level>0?(a.strm.data_type===I&&(a.strm.data_type=y(a)),t(a,a.l_desc),t(a,a.d_desc),h=w(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,e>=f&&(e=f)):e=f=c+5,e>=c+4&&-1!==b?A(a,b,c,d):a.strategy===F||f===e?(g(a,(K<<1)+(d?1:0),3),s(a,eb,fb)):(g(a,(L<<1)+(d?1:0),3),x(a,a.l_desc.max_code+1,a.d_desc.max_code+1,h+1),s(a,a.dyn_ltree,a.dyn_dtree)),n(a),d&&o(a)}function D(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(hb[c]+P+1)]++,a.dyn_dtree[2*e(b)]++),a.last_lit===a.lit_bufsize-1}var E=a("../utils/common"),F=4,G=0,H=1,I=2,J=0,K=1,L=2,M=3,N=258,O=29,P=256,Q=P+1+O,R=30,S=19,T=2*Q+1,U=15,V=16,W=7,X=256,Y=16,Z=17,$=18,_=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ab=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],bb=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],db=512,eb=new Array(2*(Q+2));d(eb);var fb=new Array(2*R);d(fb);var gb=new Array(db);d(gb);var hb=new Array(N-M+1);d(hb);var ib=new Array(O);d(ib);var jb=new Array(R);d(jb);var kb,lb,mb,nb=function(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length},ob=function(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b},pb=!1;c._tr_init=z,c._tr_stored_block=A,c._tr_flush_block=C,c._tr_tally=D,c._tr_align=B},{"../utils/common":27}],39:[function(a,b){"use strict";function c(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=c},{}]},{},[9])(9)});'use strict';if(tr.isVinn){global.JSZip=global.window.JSZip;global.window=undefined;}else if(tr.isNode){const jsZipAbsPath=HTMLImportsLoader.hrefToAbsolutePath('/jszip.min.js');const jsZipModule=require(jsZipAbsPath);global.JSZip=jsZipModule;}'use strict';tr.exportTo('tr.e.importer',function(){function ZipImporter(model,eventData){if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);} this.model_=model;this.eventData_=eventData;} ZipImporter.canImport=function(eventData){let header;if(eventData instanceof ArrayBuffer){header=new Uint8Array(eventData.slice(0,2));}else if(typeof(eventData)==='string'||eventData instanceof String){header=[eventData.charCodeAt(0),eventData.charCodeAt(1)];}else{return false;} return header[0]==='P'.charCodeAt(0)&&header[1]==='K'.charCodeAt(0);};ZipImporter.prototype={__proto__:tr.importer.Importer.prototype,get importerName(){return'ZipImporter';},isTraceDataContainer(){return true;},extractSubtraces(){const zip=new JSZip(this.eventData_);const subtraces=[];for(const idx in zip.files){subtraces.push(zip.files[idx].asBinary());} @@ -5549,7 +5549,7 @@ XMarkerAnnotationView.prototype={__proto__:tr.ui.annotations.AnnotationView.prototype,draw(ctx){const dt=this.viewport_.currentDisplayTransform;const viewX=dt.xWorldToView(this.annotation_.timestamp);ctx.beginPath();tr.ui.b.drawLine(ctx,viewX,0,viewX,ctx.canvas.height);ctx.strokeStyle=this.annotation_.strokeStyle;ctx.stroke();}};return{XMarkerAnnotationView,};});'use strict';tr.exportTo('tr.model',function(){function XMarkerAnnotation(timestamp){tr.model.Annotation.apply(this,arguments);this.timestamp=timestamp;this.strokeStyle='rgba(0, 0, 255, 0.5)';} XMarkerAnnotation.fromDict=function(dict){return new XMarkerAnnotation(dict.args.timestamp);};XMarkerAnnotation.prototype={__proto__:tr.model.Annotation.prototype,toDict(){return{typeName:'xmarker',args:{timestamp:this.timestamp}};},createView_(viewport){return new tr.ui.annotations.XMarkerAnnotationView(viewport,this);}};tr.model.Annotation.register(XMarkerAnnotation,{typeName:'xmarker'});return{XMarkerAnnotation,};});'use strict';tr.exportTo('tr.e.importer',function(){const Base64=tr.b.Base64;const deepCopy=tr.b.deepCopy;const ColorScheme=tr.b.ColorScheme;const HeapDumpTraceEventImporter=tr.e.importer.HeapDumpTraceEventImporter;const LegacyHeapDumpTraceEventImporter=tr.e.importer.LegacyHeapDumpTraceEventImporter;const StreamingEventExpander=tr.e.importer.StreamingEventExpander;const ProfilingDictionaryReader=tr.e.importer.ProfilingDictionaryReader;function getEventColor(event,opt_customName){if(event.cname){return ColorScheme.getColorIdForReservedName(event.cname);}else if(opt_customName||event.name){return ColorScheme.getColorIdForGeneralPurposeString(opt_customName||event.name);}} function isLegacyChromeClockSyncEvent(event){return event.name!==undefined&&event.name.startsWith(LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX)&&((event.ph==='S')||(event.ph==='F'));} -const PRODUCER='producer';const CONSUMER='consumer';const STEP='step';const BACKGROUND=tr.model.ContainerMemoryDump.LevelOfDetail.BACKGROUND;const LIGHT=tr.model.ContainerMemoryDump.LevelOfDetail.LIGHT;const DETAILED=tr.model.ContainerMemoryDump.LevelOfDetail.DETAILED;const MEMORY_DUMP_LEVEL_OF_DETAIL_ORDER=[undefined,BACKGROUND,LIGHT,DETAILED];const GLOBAL_MEMORY_ALLOCATOR_DUMP_PREFIX='global/';const LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX='ClockSyncEvent.';const BYTE_STAT_NAME_MAP={'pc':'privateCleanResident','pd':'privateDirtyResident','sc':'sharedCleanResident','sd':'sharedDirtyResident','pss':'proportionalResident','sw':'swapped'};const WEAK_MEMORY_ALLOCATOR_DUMP_FLAG=1<<0;const OBJECT_TYPE_NAME_PATTERNS=[{prefix:'const char *WOW::getStringWithTypeName() [T = ',suffix:']'},{prefix:'const char* WOW::getStringWithTypeName() [with T = ',suffix:']'},{prefix:'const char *__cdecl WOW::getStringWithTypeName<',suffix:'>(void)'}];const SUBTRACE_FIELDS=new Set(['powerTraceAsString','systemTraceEvents',]);const NON_METADATA_FIELDS=new Set(['displayTimeUnit','samples','stackFrames','traceAnnotations','traceEvents',...SUBTRACE_FIELDS]);function TraceEventImporter(model,eventData){this.hasEvents_=undefined;this.importPriority=1;this.model_=model;this.events_=undefined;this.sampleEvents_=undefined;this.stackFrameEvents_=undefined;this.stackFrameTree_=new tr.model.ProfileTree();this.subtraces_=[];this.eventsWereFromString_=false;this.softwareMeasuredCpuCount_=undefined;this.allAsyncEvents_=[];this.allFlowEvents_=[];this.allObjectEvents_=[];this.contextProcessorPerThread={};this.traceEventSampleStackFramesByName_={};this.v8ProcessCodeMaps_={};this.v8ProcessRootStackFrame_={};this.v8SamplingData_=[];this.profileTrees_=new Map();this.profileInfo_=new Map();this.legacyChromeClockSyncStartEvent_=undefined;this.legacyChromeClockSyncFinishEvent_=undefined;this.allMemoryDumpEvents_={};this.heapProfileExpander=new ProfilingDictionaryReader();this.objectTypeNameMap_={};this.clockDomainId_=tr.model.ClockDomainId.UNKNOWN_CHROME_LEGACY;this.toModelTime_=undefined;if(typeof(eventData)==='string'||eventData instanceof String){eventData=eventData.trim();if(eventData[0]==='['){eventData=eventData.replace(/\s*,\s*$/,'');if(eventData[eventData.length-1]!==']'){eventData=eventData+']';}} +const PRODUCER='producer';const CONSUMER='consumer';const STEP='step';const BACKGROUND=tr.model.ContainerMemoryDump.LevelOfDetail.BACKGROUND;const LIGHT=tr.model.ContainerMemoryDump.LevelOfDetail.LIGHT;const DETAILED=tr.model.ContainerMemoryDump.LevelOfDetail.DETAILED;const MEMORY_DUMP_LEVEL_OF_DETAIL_ORDER=[undefined,BACKGROUND,LIGHT,DETAILED];const GLOBAL_MEMORY_ALLOCATOR_DUMP_PREFIX='global/';const LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX='ClockSyncEvent.';const BYTE_STAT_NAME_MAP={'pc':'privateCleanResident','pd':'privateDirtyResident','sc':'sharedCleanResident','sd':'sharedDirtyResident','pss':'proportionalResident','sw':'swapped'};const WEAK_MEMORY_ALLOCATOR_DUMP_FLAG=1<<0;const OBJECT_TYPE_NAME_PATTERNS=[{prefix:'const char *WTF::getStringWithTypeName() [T = ',suffix:']'},{prefix:'const char* WTF::getStringWithTypeName() [with T = ',suffix:']'},{prefix:'const char *__cdecl WTF::getStringWithTypeName<',suffix:'>(void)'}];const SUBTRACE_FIELDS=new Set(['powerTraceAsString','systemTraceEvents',]);const NON_METADATA_FIELDS=new Set(['displayTimeUnit','samples','stackFrames','traceAnnotations','traceEvents',...SUBTRACE_FIELDS]);function TraceEventImporter(model,eventData){this.hasEvents_=undefined;this.importPriority=1;this.model_=model;this.events_=undefined;this.sampleEvents_=undefined;this.stackFrameEvents_=undefined;this.stackFrameTree_=new tr.model.ProfileTree();this.subtraces_=[];this.eventsWereFromString_=false;this.softwareMeasuredCpuCount_=undefined;this.allAsyncEvents_=[];this.allFlowEvents_=[];this.allObjectEvents_=[];this.contextProcessorPerThread={};this.traceEventSampleStackFramesByName_={};this.v8ProcessCodeMaps_={};this.v8ProcessRootStackFrame_={};this.v8SamplingData_=[];this.profileTrees_=new Map();this.profileInfo_=new Map();this.legacyChromeClockSyncStartEvent_=undefined;this.legacyChromeClockSyncFinishEvent_=undefined;this.allMemoryDumpEvents_={};this.heapProfileExpander=new ProfilingDictionaryReader();this.objectTypeNameMap_={};this.clockDomainId_=tr.model.ClockDomainId.UNKNOWN_CHROME_LEGACY;this.toModelTime_=undefined;if(typeof(eventData)==='string'||eventData instanceof String){eventData=eventData.trim();if(eventData[0]==='['){eventData=eventData.replace(/\s*,\s*$/,'');if(eventData[eventData.length-1]!==']'){eventData=eventData+']';}} this.events_=JSON.parse(eventData);this.eventsWereFromString_=true;}else{this.events_=eventData;} if(this.events_.traceEvents){const container=this.events_;this.events_=this.events_.traceEvents;for(const subtraceField of SUBTRACE_FIELDS){if(container[subtraceField]){this.storeSubtrace_(container[subtraceField]);}} this.storeSamples_(container.samples);this.storeStackFrames_(container.stackFrames);this.storeDisplayTimeUnit_(container.displayTimeUnit);this.storeTraceAnnotations_(container.traceAnnotations);this.storeMetadata_(container);}else if(this.events_ instanceof tr.b.TraceStream){const parser=oboe().node('{cat ph}',function(e){return oboe.drop;}).node('!.powerTraceAsString',this.storeSubtrace_.bind(this)).node('!.systemTraceEvents',this.storeSubtrace_.bind(this)).node('!.samples',this.storeSamples_.bind(this)).node('!.stackFrames',this.storeStackFrames_.bind(this)).node('!.displayTimeUnit',this.storeDisplayTimeUnit_.bind(this)).node('!.traceAnnotations',this.storeTraceAnnotations_.bind(this)).done(this.storeMetadata_.bind(this));this.events_.rewind();while(this.events_.hasData){parser.write(this.events_.readNumBytes());} @@ -6970,7 +6970,7 @@ get length(){return this._diagnostics.length;}*[Symbol.iterator](){for(const diagnostic of this._diagnostics)yield diagnostic;} asDictInto_(d){d.diagnostics=this._diagnostics.map(d=>d.asDictOrReference());} static fromDict(d){return new UnmergeableDiagnosticSet(d.diagnostics.map(d=>((typeof d==='string')?new tr.v.d.DiagnosticRef(d):tr.v.d.Diagnostic.fromDict(d))));}} -tr.v.d.Diagnostic.register(UnmergeableDiagnosticSet,{elementName:'tr-v-ui-unmergeable-diagnostic-set-span'});return{UnmergeableDiagnosticSet,};});'use strict';tr.exportTo('tr.v.d',function(){const RESERVED_INFOS={ANGLE_REVISIONS:{name:'angleRevisions',type:tr.v.d.GenericSet},ARCHITECTURES:{name:'architectures',type:tr.v.d.GenericSet},BENCHMARKS:{name:'benchmarks',type:tr.v.d.GenericSet},BENCHMARK_START:{name:'benchmarkStart',type:tr.v.d.DateRange},BENCHMARK_DESCRIPTIONS:{name:'benchmarkDescriptions',type:tr.v.d.GenericSet},BOTS:{name:'bots',type:tr.v.d.GenericSet},BUG_COMPONENTS:{name:'bugComponents',type:tr.v.d.GenericSet},BUILDS:{name:'builds',type:tr.v.d.GenericSet},CATAPULT_REVISIONS:{name:'catapultRevisions',type:tr.v.d.GenericSet},CHROMIUM_COMMIT_POSITIONS:{name:'chromiumCommitPositions',type:tr.v.d.GenericSet},CHROMIUM_REVISIONS:{name:'chromiumRevisions',type:tr.v.d.GenericSet},DEVICE_IDS:{name:'deviceIds',type:tr.v.d.GenericSet},GPUS:{name:'gpus',type:tr.v.d.GenericSet},GROUPING_PATH:{name:'groupingPath',type:tr.v.d.GroupingPath},IS_REFERENCE_BUILD:{name:'isReferenceBuild',type:tr.v.d.GenericSet},LABELS:{name:'labels',type:tr.v.d.GenericSet},LOG_URLS:{name:'logUrls',type:tr.v.d.GenericSet},PRIMARYS:{name:'primarys',type:tr.v.d.GenericSet},MEMORY_AMOUNTS:{name:'memoryAmounts',type:tr.v.d.GenericSet},MERGED_FROM:{name:'mergedFrom',type:tr.v.d.RelatedHistogramMap},MERGED_TO:{name:'mergedTo',type:tr.v.d.RelatedHistogramMap},OS_NAMES:{name:'osNames',type:tr.v.d.GenericSet},OS_VERSIONS:{name:'osVersions',type:tr.v.d.GenericSet},OWNERS:{name:'owners',type:tr.v.d.GenericSet},PRODUCT_VERSIONS:{name:'productVersions',type:tr.v.d.GenericSet},RELATED_NAMES:{name:'relatedNames',type:tr.v.d.GenericSet},SKIA_REVISIONS:{name:'skiaRevisions',type:tr.v.d.GenericSet},STORIES:{name:'stories',type:tr.v.d.GenericSet},STORYSET_REPEATS:{name:'storysetRepeats',type:tr.v.d.GenericSet},STORY_TAGS:{name:'storyTags',type:tr.v.d.GenericSet},TAG_MAP:{name:'tagmap',type:tr.v.d.TagMap},TRACE_START:{name:'traceStart',type:tr.v.d.DateRange},TRACE_URLS:{name:'traceUrls',type:tr.v.d.GenericSet},V8_COMMIT_POSITIONS:{name:'v8CommitPositions',type:tr.v.d.DateRange},V8_REVISIONS:{name:'v8Revisions',type:tr.v.d.GenericSet},WEBRTC_REVISIONS:{name:'webrtcRevisions',type:tr.v.d.GenericSet},};const RESERVED_NAMES={};const RESERVED_NAMES_TO_TYPES=new Map();for(const[codename,info]of Object.entries(RESERVED_INFOS)){RESERVED_NAMES[codename]=info.name;if(RESERVED_NAMES_TO_TYPES.has(info.name)){throw new Error(`Duplicate reserved name "${info.name}"`);} +tr.v.d.Diagnostic.register(UnmergeableDiagnosticSet,{elementName:'tr-v-ui-unmergeable-diagnostic-set-span'});return{UnmergeableDiagnosticSet,};});'use strict';tr.exportTo('tr.v.d',function(){const RESERVED_INFOS={ANGLE_REVISIONS:{name:'angleRevisions',type:tr.v.d.GenericSet},ARCHITECTURES:{name:'architectures',type:tr.v.d.GenericSet},BENCHMARKS:{name:'benchmarks',type:tr.v.d.GenericSet},BENCHMARK_START:{name:'benchmarkStart',type:tr.v.d.DateRange},BENCHMARK_DESCRIPTIONS:{name:'benchmarkDescriptions',type:tr.v.d.GenericSet},BOTS:{name:'bots',type:tr.v.d.GenericSet},BUG_COMPONENTS:{name:'bugComponents',type:tr.v.d.GenericSet},BUILDS:{name:'builds',type:tr.v.d.GenericSet},CATAPULT_REVISIONS:{name:'catapultRevisions',type:tr.v.d.GenericSet},CHROMIUM_COMMIT_POSITIONS:{name:'chromiumCommitPositions',type:tr.v.d.GenericSet},CHROMIUM_REVISIONS:{name:'chromiumRevisions',type:tr.v.d.GenericSet},DEVICE_IDS:{name:'deviceIds',type:tr.v.d.GenericSet},GPUS:{name:'gpus',type:tr.v.d.GenericSet},GROUPING_PATH:{name:'groupingPath',type:tr.v.d.GroupingPath},IS_REFERENCE_BUILD:{name:'isReferenceBuild',type:tr.v.d.GenericSet},LABELS:{name:'labels',type:tr.v.d.GenericSet},LOG_URLS:{name:'logUrls',type:tr.v.d.GenericSet},MASTERS:{name:'masters',type:tr.v.d.GenericSet},MEMORY_AMOUNTS:{name:'memoryAmounts',type:tr.v.d.GenericSet},MERGED_FROM:{name:'mergedFrom',type:tr.v.d.RelatedHistogramMap},MERGED_TO:{name:'mergedTo',type:tr.v.d.RelatedHistogramMap},OS_NAMES:{name:'osNames',type:tr.v.d.GenericSet},OS_VERSIONS:{name:'osVersions',type:tr.v.d.GenericSet},OWNERS:{name:'owners',type:tr.v.d.GenericSet},PRODUCT_VERSIONS:{name:'productVersions',type:tr.v.d.GenericSet},RELATED_NAMES:{name:'relatedNames',type:tr.v.d.GenericSet},SKIA_REVISIONS:{name:'skiaRevisions',type:tr.v.d.GenericSet},STORIES:{name:'stories',type:tr.v.d.GenericSet},STORYSET_REPEATS:{name:'storysetRepeats',type:tr.v.d.GenericSet},STORY_TAGS:{name:'storyTags',type:tr.v.d.GenericSet},TAG_MAP:{name:'tagmap',type:tr.v.d.TagMap},TRACE_START:{name:'traceStart',type:tr.v.d.DateRange},TRACE_URLS:{name:'traceUrls',type:tr.v.d.GenericSet},V8_COMMIT_POSITIONS:{name:'v8CommitPositions',type:tr.v.d.DateRange},V8_REVISIONS:{name:'v8Revisions',type:tr.v.d.GenericSet},WEBRTC_REVISIONS:{name:'webrtcRevisions',type:tr.v.d.GenericSet},};const RESERVED_NAMES={};const RESERVED_NAMES_TO_TYPES=new Map();for(const[codename,info]of Object.entries(RESERVED_INFOS)){RESERVED_NAMES[codename]=info.name;if(RESERVED_NAMES_TO_TYPES.has(info.name)){throw new Error(`Duplicate reserved name "${info.name}"`);} RESERVED_NAMES_TO_TYPES.set(info.name,info.type);} const RESERVED_NAMES_SET=new Set(Object.values(RESERVED_NAMES));return{RESERVED_INFOS,RESERVED_NAMES,RESERVED_NAMES_SET,RESERVED_NAMES_TO_TYPES,};});'use strict';tr.exportTo('tr.v.d',function(){class DiagnosticMap extends Map{constructor(opt_allowReservedNames){super();if(opt_allowReservedNames===undefined){opt_allowReservedNames=true;} this.allowReservedNames_=opt_allowReservedNames;} @@ -7244,10 +7244,10 @@ if(dict.callback===undefined){throw new Error('callback must be given');} this.eventType_=dict.eventType;this.keyCodes_=[];if(dict.keyCode){this.pushKeyCode_(dict.keyCode);}else if(dict.keyCodes){dict.keyCodes.forEach(this.pushKeyCode_,this);} this.useCapture_=!!dict.useCapture;this.callback_=dict.callback;this.thisArg_=dict.thisArg!==undefined?dict.thisArg:undefined;this.helpText_=dict.helpText!==undefined?dict.helpText:undefined;} -HotKey.prototype={get eventType(){return this.eventType_;},get keyCodes(){return this.keyCodes_;},get helpText(){return this.helpText_;},call(e){this.callback_.call(this.thisArg_,e);},pushKeyCode_(keyCode){this.keyCodes_.push(keyCode);}};return{HotKey,};});'use strict';Polymer({is:'tv-ui-b-hotkey-controller',created(){this.isAttached_=false;this.globalMode_=false;this.coupledToParentController_=undefined;this.curHost_=undefined;this.childControllers_=[];this.bubblingKeyDownHotKeys_={};this.capturingKeyDownHotKeys_={};this.bubblingKeyPressHotKeys_={};this.capturingKeyPressHotKeys_={};this.onBubblingKeyDown_=this.onKey_.bind(this,false);this.onCapturingKeyDown_=this.onKey_.bind(this,true);this.onBubblingKeyPress_=this.onKey_.bind(this,false);this.onCapturingKeyPress_=this.onKey_.bind(this,true);},attached(){this.isAttached_=true;const host=this.findHost_();if(host.__hotkeyController){throw new Error('Multiple hotkey controllers attached to this host');} +HotKey.prototype={get eventType(){return this.eventType_;},get keyCodes(){return this.keyCodes_;},get helpText(){return this.helpText_;},call(e){this.callback_.call(this.thisArg_,e);},pushKeyCode_(keyCode){this.keyCodes_.push(keyCode);}};return{HotKey,};});'use strict';Polymer({is:'tv-ui-b-hotkey-controller',created(){this.isAttached_=false;this.globalMode_=false;this.slavedToParentController_=undefined;this.curHost_=undefined;this.childControllers_=[];this.bubblingKeyDownHotKeys_={};this.capturingKeyDownHotKeys_={};this.bubblingKeyPressHotKeys_={};this.capturingKeyPressHotKeys_={};this.onBubblingKeyDown_=this.onKey_.bind(this,false);this.onCapturingKeyDown_=this.onKey_.bind(this,true);this.onBubblingKeyPress_=this.onKey_.bind(this,false);this.onCapturingKeyPress_=this.onKey_.bind(this,true);},attached(){this.isAttached_=true;const host=this.findHost_();if(host.__hotkeyController){throw new Error('Multiple hotkey controllers attached to this host');} host.__hotkeyController=this;this.curHost_=host;let parentElement;if(host.parentElement){parentElement=host.parentElement;}else{parentElement=Polymer.dom(host).parentNode.host;} -const parentController=tr.b.getHotkeyControllerForElement(parentElement);if(parentController){this.coupledToParentController_=parentController;parentController.addChildController_(this);return;} -host.addEventListener('keydown',this.onBubblingKeyDown_,false);host.addEventListener('keydown',this.onCapturingKeyDown_,true);host.addEventListener('keypress',this.onBubblingKeyPress_,false);host.addEventListener('keypress',this.onCapturingKeyPress_,true);},detached(){this.isAttached_=false;const host=this.curHost_;if(!host)return;delete host.__hotkeyController;this.curHost_=undefined;if(this.coupledToParentController_){this.coupledToParentController_.removeChildController_(this);this.coupledToParentController_=undefined;return;} +const parentController=tr.b.getHotkeyControllerForElement(parentElement);if(parentController){this.slavedToParentController_=parentController;parentController.addChildController_(this);return;} +host.addEventListener('keydown',this.onBubblingKeyDown_,false);host.addEventListener('keydown',this.onCapturingKeyDown_,true);host.addEventListener('keypress',this.onBubblingKeyPress_,false);host.addEventListener('keypress',this.onCapturingKeyPress_,true);},detached(){this.isAttached_=false;const host=this.curHost_;if(!host)return;delete host.__hotkeyController;this.curHost_=undefined;if(this.slavedToParentController_){this.slavedToParentController_.removeChildController_(this);this.slavedToParentController_=undefined;return;} host.removeEventListener('keydown',this.onBubblingKeyDown_,false);host.removeEventListener('keydown',this.onCapturingKeyDown_,true);host.removeEventListener('keypress',this.onBubblingKeyPress_,false);host.removeEventListener('keypress',this.onCapturingKeyPress_,true);},addChildController_(controller){const i=this.childControllers_.indexOf(controller);if(i!==-1){throw new Error('Controller already registered');} this.childControllers_.push(controller);},removeChildController_(controller){const i=this.childControllers_.indexOf(controller);if(i===-1){throw new Error('Controller not registered');} this.childControllers_.splice(i,1);return controller;},getKeyMapForEventType_(eventType,useCapture){if(eventType==='keydown'){if(!useCapture){return this.bubblingKeyDownHotKeys_;} @@ -7262,7 +7262,7 @@ keyMap[keyCode]=hotKey;} for(let i=0;i<hotKey.keyCodes.length;i++){const keyCode=hotKey.keyCodes[i];delete keyMap[keyCode];} return hotKey;},get globalMode(){return this.globalMode_;},set globalMode(globalMode){const wasAttached=this.isAttached_;if(wasAttached){this.detached();} -this.globalMode_=!!globalMode;if(wasAttached){this.attached();}},get topmostConroller_(){if(this.coupledToParentController_){return this.coupledToParentController_.topmostConroller_;} +this.globalMode_=!!globalMode;if(wasAttached){this.attached();}},get topmostConroller_(){if(this.slavedToParentController_){return this.slavedToParentController_.topmostConroller_;} return this;},childRequestsGeneralFocus(child){const topmost=this.topmostConroller_;if(topmost.curHost_){if(topmost.curHost_.hasAttribute('tabIndex')){topmost.curHost_.focus();}else{if(document.activeElement){document.activeElement.blur();}}}else{if(document.activeElement){document.activeElement.blur();}}},childRequestsBlur(child){child.blur();const topmost=this.topmostConroller_;if(topmost.curHost_){topmost.curHost_.focus();}},findHost_(){if(this.globalMode_)return document.body;if(this.parentElement)return this.parentElement;if(!Polymer.dom(this).parentNode)return this.host;let node=this.parentNode;while(Polymer.dom(node).parentNode)node=Polymer.dom(node).parentNode;return node.host;},appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e){const localKeyMap=this.getKeyMapForEventType_(e.type,useCapture);const localHotKey=localKeyMap[e.keyCode];if(localHotKey){matchedHotKeys.push(localHotKey);} for(let i=0;i<this.childControllers_.length;i++){const controller=this.childControllers_[i];controller.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);}},onKey_(useCapture,e){if(!useCapture&&e.path[0].tagName==='INPUT')return;let sortedControllers;const matchedHotKeys=[];this.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);if(matchedHotKeys.length===0)return false;if(matchedHotKeys.length>1){throw new Error('More than one hotKey is currently unsupported');} const hotKey=matchedHotKeys[0];let prevented=0;prevented|=hotKey.call(e);return!prevented&&e.defaultPrevented;}});'use strict';tr.exportTo('tr.b',function(){function getHotkeyControllerForElement(refElement){let curElement=refElement;while(curElement){if(curElement.tagName==='tv-ui-b-hotkey-controller'){return curElement;} @@ -7614,7 +7614,7 @@ const ans={supported:false};for(const proc of Object.values(m.processes)){proc.objects.iterObjectInstances(function(instance){if(instance instanceof BlameContextInstance){ans.supported=true;}});} if(!ans.supported){ans.reason='No frame data available';} return ans;},get currentRangeOfInterest(){if(this.rangeOfInterest_.isEmpty){return this.model_.bounds;} -return this.rangeOfInterest_;},get rangeOfInterest(){return this.rangeOfInterest_;},set rangeOfInterest(rangeOfInterest){this.rangeOfInterest_=rangeOfInterest;this.updateContents_();},get selection(){},set selection(_){},get textLabel(){return'Frame Data';},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();}});tr.ui.side_panel.SidePanelRegistry.register(function(){return document.createElement('tr-ui-e-s-frame-data-side-panel');});});'use strict';Polymer({is:'tr-ui-b-chart-legend-key',ready(){this.$.checkbox.addEventListener('change',this.onCheckboxChange_.bind(this));},onCheckboxChange_(){tr.b.dispatchSimpleEvent(this,tr.ui.b.DataSeriesEnableChangeEventType,true,false,{key:Polymer.dom(this).textContent,enabled:this.enabled});},set textContent(t){Polymer.dom(this.$.label).textContent=t;Polymer.dom(this.$.link).textContent=t;this.updateContents_();},set width(w){w-=20;this.$.link.style.width=w+'px';this.$.label.style.width=w+'px';},get textContent(){return Polymer.dom(this.$.label).textContent;},set optional(optional){this.$.checkbox.style.visibility=optional?'visible':'hidden';},get optional(){return this.$.checkbox.style.visibility==='visible';},set enabled(enabled){this.$.checkbox.checked=enabled?'checked':'';},get enabled(){return this.$.checkbox.checked;},set color(c){this.$.label.style.color=c;this.$.link.color=c;},set target(target){this.$.link.setSelectionAndContent(target,Polymer.dom(this.$.label).textContent);this.updateContents_();},get target(){return this.$.link.selection;},set title(title){this.$.link.title=title;},updateContents_(){this.$.link.style.display=this.target?'':'none';this.$.label.style.display=this.target?'none':'';this.$.label.htmlFor=this.optional?'checkbox':'';}});'use strict';(function(window){window.define=function(x){window.d3=x;};window.define.amd=true;})(this);!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function d(){Xo.event.preventDefault()}function m(){for(var n,t=Xo.event;n=t.sourceEvent;)t=n;return t}function y(n){for(var t=new p,e=0,r=arguments.length;++e<r;)t[arguments[e]]=v(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=Xo.event;u.target=n,Xo.event=u,t[u.type].apply(e,r)}finally{Xo.event=i}}},t}function x(n){return fa(n,da),n}function M(n){return"function"==typeof n?n:function(){return ha(n,this)}}function _(n){return"function"==typeof n?n:function(){return ga(n,this)}}function b(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=Xo.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function w(n){return n.trim().replace(/\s+/g," ")}function S(n){return new RegExp("(?:^|\\s+)"+Xo.requote(n)+"(?:\\s+|$)","g")}function k(n){return n.trim().split(/^|\s+/)}function E(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=k(n).map(A);var u=n.length;return"function"==typeof t?r:e}function A(n){var t=S(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",w(u+" "+n))):e.setAttribute("class",w(u.replace(t," ")))}}function C(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function N(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function L(n){return"function"==typeof n?n:(n=Xo.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function T(n){return{__data__:n}}function q(n){return function(){return va(this,n)}}function z(n){return arguments.length||(n=Xo.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function R(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function U(){var n=this.__transition__;n&&++n.active}function j(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=c(t,Bo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+Xo.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),c=H;a>0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o,a=0,c=0,s=0;if(u=/([a-z]+)\((.*)\)/i.exec(n))switch(i=u[2].split(","),u[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Mt(i[0]),Mt(i[1]),Mt(i[2]))}return(o=Va.get(n))?t(o.r,o.g,o.b):(null!=n&&"#"===n.charAt(0)&&(r=parseInt(n.substring(1),16),isNaN(r)||(4===n.length?(a=(3840&r)>>4,a=a>>4|a,c=240&r,c=c>>4|c,s=15&r,s=s<<4|s):7===n.length&&(a=(16711680&r)>>16,c=(65280&r)>>8,s=255&r))),t(a,c,s))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return $a=n,e}function Nt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Lt(n,t){var e=Math.pow(10,3*oa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Tt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function zt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=zt;var r=new zt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=zt;var r=new zt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=uc[e=n.charAt(++a)])&&(e=n.charAt(++a)),(i=C[e])&&(e=i(t,null==u?"e"===e?" ":"0":u)),o.push(e),c=a+1);return o.push(n.substring(c,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},u=e(r,n,t,0);if(u!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var i=null!=r.Z&&ec!==zt,o=new(i?zt:ec);return"j"in r?o.setFullYear(r.y,0,r.j):"w"in r&&("W"in r||"U"in r)?(o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+Math.floor(r.Z/100),r.M+r.Z%100,r.S,r.L),i?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=zt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=zt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function Ft(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ot(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Yt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function It(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Zt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.y=Xt(+r[0]),e+r[0].length):-1}function Vt(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Xt(n){return n+(n>68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function re(){}function ue(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function ie(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function oe(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function ae(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)oe(n[e],t,1);t.polygonEnd()}function ce(){function n(n,t){n*=Na,t=t*Na/2+Sa/4;var e=n-r,o=e>=0?1:-1,a=o*e,c=Math.cos(t),s=Math.sin(t),l=i*s,f=u*c+l*Math.cos(a),h=l*o*Math.sin(a);hc.add(Math.atan2(h,f)),r=n,u=c,i=s}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*Na,u=Math.cos(a=(e=a)*Na/2+Sa/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function se(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function le(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function fe(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function they(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ge(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function pe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function ve(n){return[Math.atan2(n[1],n[0]),X(n[2])]}function de(n,t){return oa(n[0]-t[0])<Aa&&oa(n[1]-t[1])<Aa}function me(n,t){n*=Na;var e=Math.cos(t*=Na);ye(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function ye(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function xe(){function n(n,u){n*=Na;var i=Math.cos(u*=Na),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),ye(t,e,r)}var t,e,r;kc.point=function(u,i){u*=Na;var o=Math.cos(i*=Na);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,ye(t,e,r)}}function Me(){kc.point=me}function _e(){function n(n,t){n*=Na;var e=Math.cos(t*=Na),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-V(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),ye(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=Na;var c=Math.cos(a*=Na);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),ye(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Me,kc.point=me}}function be(){return!0}function we(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(de(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function ke(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Ee(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Sa,k=p*x;if(hc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*ka:_,S^h>=e^m>=e){var E=fe(se(f),se(n));pe(E);var A=fe(u,E);pe(A);var C=(S^_>=0?-1:1)*X(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function Te(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)<Aa?(n.point(e,r=(r+o)/2>0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)<Aa&&(e-=u*Aa),oa(i-a)<Aa&&(i-=a*Aa),r=qe(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function qe(n,t,e,r){var u,i,o=Math.sin(n-e);return oa(o)>Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function ze(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]<t[0]?Sa:-Sa;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function Re(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);they(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(they(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)<Aa,N=C||Aa>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)<Aa?k:E):k<=_[1]&&_[1]<=E:A>Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return they(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)<Aa?u>0?0:3:oa(r[0]-e)<Aa?u>0?2:1:oa(r[1]-t)<Aa?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function They(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)<Aa||oa(r-h)<Aa?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||oa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)<Aa?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Sa/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=I(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ea]},e):xr}function yr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return oa(u)<Aa?er:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-I(u)*Math.sqrt(n*n+e*e)]},e)}function xr(n,t){return[n,Math.log(Math.tan(Sa/4+t/2))]}function Mr(n){var t,e=Qe(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=Sa*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function _r(n,t){return[Math.log(Math.tan(Sa/4+t/2)),-n]}function br(n){return n[0]}function wr(n){return n[1]}function Sr(n){for(var t=n.length,e=[0,1],r=2,u=2;t>u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function Tr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Tr(n);for(var c=i;c.circle&&oa(e-c.circle.x)<Aa&&oa(r-c.circle.cy)<Aa;)i=c.P,a.unshift(c),Tr(c),c=i;a.unshift(c),Or(c);for(var s=o;s.circle&&oa(e-s.circle.x)<Aa&&oa(r-s.circle.cy)<Aa;)o=s.N,a.push(s),Tr(s),s=o;a.push(s),Or(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function zr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)<Aa&&p-u>Aa?{x:f,y:oa(t-f)<Aa?e:p}:oa(u-p)<Aa&&h-r>Aa?{x:oa(e-p)<Aa?t:h,y:p}:oa(r-h)<Aa&&u-g>Aa?{x:h,y:oa(t-h)<Aa?e:g}:oa(u-g)<Aa&&r-f>Aa?{x:oa(e-g)<Aa?t:f,y:g}:null),i.site,null)),++c)}function jr(n,t){return t.angle-n.angle}function Hr(){Jr(this),this.x=this.y=this.arc=this.site=this.cy=null}function Fr(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function Or(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),Jr(t),n.circle=null)}function Yr(n){for(var t,e=Vc,r=De(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!Ir(t,n)||!r(t)||oa(t.a.x-t.b.x)<Aa&&oa(t.a.y-t.b.y)<Aa)&&(t.a=t.b=null,e.splice(u,1))}function Ir(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2;if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function Zr(n,t){this.l=n,this.r=t,this.a=this.b=null}function Vr(n,t,e,r){var u=new Zr(n,t);return Vc.push(u),e&&$r(u,n,t,e),r&&$r(u,t,n,r),Xc[n.i].edges.push(new Br(u,n,t)),Xc[t.i].edges.push(new Br(u,t,n)),u}function Xr(n,t,e){var r=new Zr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function $r(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function Br(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function Wr(){this._=null}function Jr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function Gr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function Kr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Qr(n){for(;n.L;)n=n.L;return n}function nu(n,t){var e,r,u,i=n.sort(tu).pop();for(Vc=[],Xc=new Array(n.length),$c=new Wr,Wc=new Wr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new Pr(i),zr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;qr(u.arc)}t&&(Yr(t),Ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function tu(n,t){return t.y-n.y||t.x-n.x}function eu(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function ru(n){return n.x}function uu(n){return n.y}function iu(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function ou(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&ou(n,c[0],e,r,o,a),c[1]&&ou(n,c[1],o,r,u,a),c[2]&&ou(n,c[2],e,a,o,i),c[3]&&ou(n,c[3],o,a,u,i)}}function au(n,t){n=Xo.rgb(n),t=Xo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+vt(Math.round(e+i*n))+vt(Math.round(r+o*n))+vt(Math.round(u+a*n))}}function cu(n,t){var e,r={},u={};for(e in n)e in t?r[e]=fu(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function su(n,t){return t-=n=+n,function(e){return n+t*e}}function lu(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=Tu(t,e),i=qu(zu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*La,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*La:0}function Tu(n,t){return n[0]*t[0]+n[1]*t[1]}function qu(n){var t=Math.sqrt(Tu(n,n));return t&&(n[0]/=t,n[1]/=t),t}function zu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ru(n,t){var e,r=[],u=[],i=Xo.transform(n),o=Xo.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:su(a[0],c[0])},{i:3,x:su(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function Du(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function Pu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function Uu(n){for(var t=n.source,e=n.target,r=Hu(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function ju(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Hu(n,t){if(n===t)return n;for(var e=ju(n),r=ju(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function Fu(n){n.fixed|=2}function Ou(n){n.fixed&=-7}function Yu(n){n.fixed|=4,n.px=n.x,n.py=n.y}function Iu(n){n.fixed&=-5}function Zu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(Zu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function Vu(n,t){return Xo.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=Wu,n}function Xu(n){return n.children}function $u(n){return n.value}function Bu(n,t){return t.value-n.value}function Wu(n){return Xo.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function Ju(n){return n.x}function Gu(n){return n.y}function Ku(n,t,e){n.y0=t,n.y=e}function Qu(n){return Xo.range(n.length)}function ni(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function ti(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=li(e[i],t),n)>0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function vi(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Mi(r,u=a):Mi(r=c,u),o--):(xi(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)ki(u[i],t,e,r)}function Ei(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Ai(n){return 1+Xo.max(n,function(n){return n.y})}function Ci(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ni(n){var t=n.children;return t&&t.length?Ni(t[0]):n}function Li(n){var t,e=n.children;return e&&(t=e.length)?Li(e[t-1]):n}function Ti(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function qi(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function zi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():zi(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=Xo.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function Hi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=zi(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=zi(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return zi(t.a[0])},e.copy=function(){return Ji(n,t)},e.domain(n)}function Gi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=Xo.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[Xo.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort(Xo.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return Gi(n,t)},e()}function Ki(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=br,r=wr,u=be,i=oo,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=Ms.get(n)||oo).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function oo(n){return n.join("L")}function ao(n){return oo(n)+"Z"}function co(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function lo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function fo(n,t){return n.length<4?oo(n):n[1]+po(n.slice(1,n.length-1),vo(n,t))}function ho(n,t){return n.length<3?oo(n):n[0]+po((n.push(n[0]),n),vo([n[n.length-2]].concat(n,[n[1]]),t))}function go(n,t){return n.length<3?oo(n):n[0]+po(n,vo(n,t))}function po(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return oo(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function vo(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function mo(n){if(n.length<3)return oo(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",_o(ws,o),",",_o(ws,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),bo(c,o,a);return n.pop(),c.push("L",r),c.join("")}function yo(n){if(n.length<4)return oo(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(_o(ws,i)+","+_o(ws,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),bo(e,i,o);return e.join("")}function xo(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[_o(ws,o),",",_o(ws,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),bo(t,o,a);return t.join("")}function Mo(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return mo(n)}function _o(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function bo(n,t,e){n.push("C",_o(_s,t),",",_o(_s,e),",",_o(bs,t),",",_o(bs,e),",",_o(ws,t),",",_o(ws,e))}function wo(n,t){return(t[1]-n[1])/(t[0]-n[0])}function So(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=wo(u,i);++t<e;)r[t]=(o+(o=wo(u=i,i=n[t+1])))/2;return r[t]=o,r}function ko(n){for(var t,e,r,u,i=[],o=So(n),a=-1,c=n.length-1;++a<c;)t=wo(n[a],n[a+1]),oa(t)<Aa?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ys,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Co(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=_t(e),_=_t(u),b=e===r?function(){return g}:_t(r),w=u===i?function(){return p}:_t(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=br,r=br,u=0,i=wr,o=be,a=oo,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=Ms.get(n)||oo).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function No(n){return n.radius}function Lo(n){return[n.x,n.y]}function To(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ys;return[e*Math.cos(r),e*Math.sin(r)]}}function qo(){return 64}function zo(){return"circle"}function Ro(n){var t=Math.sqrt(n/Sa);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Do(n,t){return fa(n,Ns),n.id=t,n}function Po(n,t,e,r){var u=n.id;return R(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function Uo(n){return null==n&&(n=""),function(){this.textContent=n}}function jo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,Xo.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]<js[i]/u?i-1:i]:[Os,Yi(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=zi(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=zi(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.3"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},Xo.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},Xo.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r;return i?u+i*(n[r]-u):u},Xo.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var ia=Xo.bisector(function(n){return n});Xo.bisectLeft=ia.left,Xo.bisect=Xo.bisectRight=ia.right,Xo.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},Xo.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},Xo.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},Xo.transpose=function(n){return Xo.zip.apply(Xo,n)},Xo.keys=function(n){var t=[];for(var e in n)t.push(e);return t},Xo.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},Xo.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},Xo.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},Xo.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:i,get:function(n){return this[aa+n]},set:function(n,t){return this[aa+n]=t},remove:o,keys:a,values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1),this[t])}});var aa="\x00",ca=aa.charCodeAt(0);Xo.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=f(n,t,t[e]);return n};var sa=["webkit","ms","moz","Moz","o","O"];Xo.dispatch=function(){for(var n=new p,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=v(n);return n},p.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=Sizzle,va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return x(i)},da.selectAll=function(n){var t,e,r=[];n=_(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Bo(n.call(e,e.__data__,a,u))),t.parentNode=e);return x(r)};var ma={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};Xo.ns={prefix:ma,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!S(n[u]).test(t))return!1;return!0}for(t in n)this.each(E(t,n[t]));return this}return this.each(E(n,t))},da.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=T(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=T(o);for(;f>r;++r)p[r]=T(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=D([]),s=x([]),l=x([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},da.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},da.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=z.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},da.each=function(n){return R(this,function(t,e,r){n.call(t,t.__data__,e,r)})},da.call=function(n){var t=Bo(arguments);return n.apply(t[0]=this,t),this},da.empty=function(){return!this.node()},da.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return x(o)},ya.insert=function(n,t){return arguments.length<2&&(t=P(this)),da.insert.call(this,n,t)},da.transition=function(){for(var n,t,e=ks||++Ls,r=[],u=Es||{time:Date.now(),ease:yu,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&jo(t,c,e,u),n.push(t)}return Do(r,e)},da.interrupt=function(){return this.each(U)},Xo.select=function(n){var t=["string"==typeof n?ha(n,Wo):n];return t.parentNode=Jo,x([t])},Xo.selectAll=function(n){var t=Bo("string"==typeof n?ga(n,Wo):n);return t.parentNode=Jo,x([t])};var xa=Xo.select(Jo);da.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,Ta=Math.SQRT2,qa=2,za=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(Ta*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(Ta*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+za*f)/(2*i*qa*h),p=(c*c-i*i-za*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Ta;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=T.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=T.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=T.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=T.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",T=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=T.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,T,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<s;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;s>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv(" ","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;zt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:Tt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)ie(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){oe(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)oe(e[r],t,0)},Polygon:function(n,t){ae(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)ae(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)ie(e[r],t)}};Xo.geo.area=function(n){return fc=0,Xo.geo.stream(n,gc),fc};var fc,hc=new re,gc={sphere:function(){fc+=4*Sa},point:g,lineStart:g,lineEnd:g,polygonStart:function(){hc.reset(),gc.lineStart=ce},polygonEnd:function(){var n=2*hc;fc+=0>n?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,Te,ze,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(They)}).raw=They,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t<l.length-h;++t)g.push(n[a[l[t]][2]]);return g}var e=br,r=wr;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Xo.geom.polygon=function(n){return fa(n,Zc),n};var Zc=Xo.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=Cr(n),c=-1,s=this.length-Cr(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Er(o,l,u)?(Er(i,l,u)||n.push(Ar(i,o,l,u)),n.push(o)):Er(i,l,u)&&n.push(Ar(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];Pr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(jr),t.length},Br.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},Wr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Qr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(Gr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Kr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(Kr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Gr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Qr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,Gr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,Kr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,Gr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,Kr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,Gr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,Kr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},Xo.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return nu(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&eu(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=_t(r=n),t):r},t.y=function(n){return arguments.length?(o=_t(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];Xo.geom.delaunay=function(n){return Xo.geom.voronoi().triangles(n)},Xo.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(oa(c-e)+oa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=br,c=wr;return(o=arguments.length)?(a=ru,c=uu,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},Xo.interpolateRgb=au,Xo.interpolateObject=cu,Xo.interpolateNumber=su,Xo.interpolateString=lu;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;Xo.interpolate=fu,Xo.interpolators=[function(n,t){var e=typeof t;return("string"===e?Va.has(t)||/^(#|rgb\(|hsl\()/.test(t)?au:lu:t instanceof G?au:"object"===e?Array.isArray(t)?hu:cu:su)(n,t)}],Xo.interpolateArray=hu;var ns=function(){return bt},ts=Xo.map({linear:ns,poly:xu,quad:function(){return du},cubic:function(){return mu},sin:function(){return Mu},exp:function(){return _u},circle:function(){return bu},elastic:wu,back:Su,bounce:function(){return ku}}),es=Xo.map({"in":bt,out:pu,"in-out":vu,"out-in":function(n){return vu(pu(n))}});Xo.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Uu(n[e]));return t}},Xo.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=Xo.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push(Xo.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(ka-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},Xo.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=u-e,c=i*i+o*o;if(c>a*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=m.length,l=y.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=Bu,u=Xu,i=$u;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},Xo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=Xo.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},Vu(e,r)},Xo.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/Xo.sum(o),s=Xo.range(i.length);null!=e&&s.sort(e===as?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=as,r=0,u=ka;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var as={};Xo.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=Xo.permute(s,f),l=Xo.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;vi(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=si(a),i=ci(i),a&&i;)c=ci(c),o=si(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=Xo.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ti,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ti(t):qi(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return qi(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ti:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},Vu(i,a)},Xo.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[])},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(To(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=zo,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),jo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return Do(i,u)},Ns.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=_(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&jo(u,g,o,i),t.push(u)}return Do(a,o)},Ns.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=Ts,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":Ts,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ts="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return zs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=f[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=Ri(t),c=a[0],s=a[1],p=L[e],v=e?f:l,d=v[1]-v[0];return C&&(c-=p,s-=d+p),r=(e?g:h)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=p)+d:(x&&(p=Math.max(c,Math.min(s,2*x[e]-r))),r>p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),T=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?T.on("touchmove.brush",v).on("touchend.brush",y):T.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],f[1-z]-L[1]],L[0]=l[q],L[1]=f[z]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var zs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(Math.ceil(n/e)*e,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}();'use strict';(function(window){window.define=undefined;}).call(this,this);'use strict';tr.exportTo('tr.ui.b',function(){const DataSeriesEnableChangeEventType='data-series-enabled-change';const THIS_DOC=document.currentScript.ownerDocument;const svgNS='http://www.w3.org/2000/svg';const ColorScheme=tr.b.ColorScheme;function getColorOfKey(key,selected){let id=ColorScheme.getColorIdForGeneralPurposeString(key);if(selected){id+=ColorScheme.properties.brightenedOffsets[0];} +return this.rangeOfInterest_;},get rangeOfInterest(){return this.rangeOfInterest_;},set rangeOfInterest(rangeOfInterest){this.rangeOfInterest_=rangeOfInterest;this.updateContents_();},get selection(){},set selection(_){},get textLabel(){return'Frame Data';},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();}});tr.ui.side_panel.SidePanelRegistry.register(function(){return document.createElement('tr-ui-e-s-frame-data-side-panel');});});'use strict';Polymer({is:'tr-ui-b-chart-legend-key',ready(){this.$.checkbox.addEventListener('change',this.onCheckboxChange_.bind(this));},onCheckboxChange_(){tr.b.dispatchSimpleEvent(this,tr.ui.b.DataSeriesEnableChangeEventType,true,false,{key:Polymer.dom(this).textContent,enabled:this.enabled});},set textContent(t){Polymer.dom(this.$.label).textContent=t;Polymer.dom(this.$.link).textContent=t;this.updateContents_();},set width(w){w-=20;this.$.link.style.width=w+'px';this.$.label.style.width=w+'px';},get textContent(){return Polymer.dom(this.$.label).textContent;},set optional(optional){this.$.checkbox.style.visibility=optional?'visible':'hidden';},get optional(){return this.$.checkbox.style.visibility==='visible';},set enabled(enabled){this.$.checkbox.checked=enabled?'checked':'';},get enabled(){return this.$.checkbox.checked;},set color(c){this.$.label.style.color=c;this.$.link.color=c;},set target(target){this.$.link.setSelectionAndContent(target,Polymer.dom(this.$.label).textContent);this.updateContents_();},get target(){return this.$.link.selection;},set title(title){this.$.link.title=title;},updateContents_(){this.$.link.style.display=this.target?'':'none';this.$.label.style.display=this.target?'none':'';this.$.label.htmlFor=this.optional?'checkbox':'';}});'use strict';(function(window){window.define=function(x){window.d3=x;};window.define.amd=true;})(this);!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function d(){Xo.event.preventDefault()}function m(){for(var n,t=Xo.event;n=t.sourceEvent;)t=n;return t}function y(n){for(var t=new p,e=0,r=arguments.length;++e<r;)t[arguments[e]]=v(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=Xo.event;u.target=n,Xo.event=u,t[u.type].apply(e,r)}finally{Xo.event=i}}},t}function x(n){return fa(n,da),n}function M(n){return"function"==typeof n?n:function(){return ha(n,this)}}function _(n){return"function"==typeof n?n:function(){return ga(n,this)}}function b(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=Xo.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function w(n){return n.trim().replace(/\s+/g," ")}function S(n){return new RegExp("(?:^|\\s+)"+Xo.requote(n)+"(?:\\s+|$)","g")}function k(n){return n.trim().split(/^|\s+/)}function E(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=k(n).map(A);var u=n.length;return"function"==typeof t?r:e}function A(n){var t=S(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",w(u+" "+n))):e.setAttribute("class",w(u.replace(t," ")))}}function C(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function N(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function L(n){return"function"==typeof n?n:(n=Xo.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function T(n){return{__data__:n}}function q(n){return function(){return va(this,n)}}function z(n){return arguments.length||(n=Xo.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function R(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function U(){var n=this.__transition__;n&&++n.active}function j(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=c(t,Bo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+Xo.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),c=H;a>0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o,a=0,c=0,s=0;if(u=/([a-z]+)\((.*)\)/i.exec(n))switch(i=u[2].split(","),u[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Mt(i[0]),Mt(i[1]),Mt(i[2]))}return(o=Va.get(n))?t(o.r,o.g,o.b):(null!=n&&"#"===n.charAt(0)&&(r=parseInt(n.substring(1),16),isNaN(r)||(4===n.length?(a=(3840&r)>>4,a=a>>4|a,c=240&r,c=c>>4|c,s=15&r,s=s<<4|s):7===n.length&&(a=(16711680&r)>>16,c=(65280&r)>>8,s=255&r))),t(a,c,s))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return $a=n,e}function Nt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Lt(n,t){var e=Math.pow(10,3*oa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Tt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function zt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=zt;var r=new zt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=zt;var r=new zt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=uc[e=n.charAt(++a)])&&(e=n.charAt(++a)),(i=C[e])&&(e=i(t,null==u?"e"===e?" ":"0":u)),o.push(e),c=a+1);return o.push(n.substring(c,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},u=e(r,n,t,0);if(u!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var i=null!=r.Z&&ec!==zt,o=new(i?zt:ec);return"j"in r?o.setFullYear(r.y,0,r.j):"w"in r&&("W"in r||"U"in r)?(o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+Math.floor(r.Z/100),r.M+r.Z%100,r.S,r.L),i?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=zt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=zt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function Ft(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ot(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Yt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function It(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Zt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.y=Xt(+r[0]),e+r[0].length):-1}function Vt(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Xt(n){return n+(n>68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function re(){}function ue(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function ie(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function oe(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function ae(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)oe(n[e],t,1);t.polygonEnd()}function ce(){function n(n,t){n*=Na,t=t*Na/2+Sa/4;var e=n-r,o=e>=0?1:-1,a=o*e,c=Math.cos(t),s=Math.sin(t),l=i*s,f=u*c+l*Math.cos(a),h=l*o*Math.sin(a);hc.add(Math.atan2(h,f)),r=n,u=c,i=s}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*Na,u=Math.cos(a=(e=a)*Na/2+Sa/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function se(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function le(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function fe(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function he(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ge(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function pe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function ve(n){return[Math.atan2(n[1],n[0]),X(n[2])]}function de(n,t){return oa(n[0]-t[0])<Aa&&oa(n[1]-t[1])<Aa}function me(n,t){n*=Na;var e=Math.cos(t*=Na);ye(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function ye(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function xe(){function n(n,u){n*=Na;var i=Math.cos(u*=Na),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),ye(t,e,r)}var t,e,r;kc.point=function(u,i){u*=Na;var o=Math.cos(i*=Na);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,ye(t,e,r)}}function Me(){kc.point=me}function _e(){function n(n,t){n*=Na;var e=Math.cos(t*=Na),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-V(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),ye(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=Na;var c=Math.cos(a*=Na);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),ye(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Me,kc.point=me}}function be(){return!0}function we(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(de(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function ke(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Ee(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Sa,k=p*x;if(hc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*ka:_,S^h>=e^m>=e){var E=fe(se(f),se(n));pe(E);var A=fe(u,E);pe(A);var C=(S^_>=0?-1:1)*X(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function Te(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)<Aa?(n.point(e,r=(r+o)/2>0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)<Aa&&(e-=u*Aa),oa(i-a)<Aa&&(i-=a*Aa),r=qe(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function qe(n,t,e,r){var u,i,o=Math.sin(n-e);return oa(o)>Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function ze(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]<t[0]?Sa:-Sa;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function Re(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);he(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(he(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)<Aa,N=C||Aa>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)<Aa?k:E):k<=_[1]&&_[1]<=E:A>Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return he(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)<Aa?u>0?0:3:oa(r[0]-e)<Aa?u>0?2:1:oa(r[1]-t)<Aa?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function He(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)<Aa||oa(r-h)<Aa?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||oa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)<Aa?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Sa/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=I(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ea]},e):xr}function yr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return oa(u)<Aa?er:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-I(u)*Math.sqrt(n*n+e*e)]},e)}function xr(n,t){return[n,Math.log(Math.tan(Sa/4+t/2))]}function Mr(n){var t,e=Qe(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=Sa*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function _r(n,t){return[Math.log(Math.tan(Sa/4+t/2)),-n]}function br(n){return n[0]}function wr(n){return n[1]}function Sr(n){for(var t=n.length,e=[0,1],r=2,u=2;t>u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function Tr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Tr(n);for(var c=i;c.circle&&oa(e-c.circle.x)<Aa&&oa(r-c.circle.cy)<Aa;)i=c.P,a.unshift(c),Tr(c),c=i;a.unshift(c),Or(c);for(var s=o;s.circle&&oa(e-s.circle.x)<Aa&&oa(r-s.circle.cy)<Aa;)o=s.N,a.push(s),Tr(s),s=o;a.push(s),Or(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function zr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)<Aa&&p-u>Aa?{x:f,y:oa(t-f)<Aa?e:p}:oa(u-p)<Aa&&h-r>Aa?{x:oa(e-p)<Aa?t:h,y:p}:oa(r-h)<Aa&&u-g>Aa?{x:h,y:oa(t-h)<Aa?e:g}:oa(u-g)<Aa&&r-f>Aa?{x:oa(e-g)<Aa?t:f,y:g}:null),i.site,null)),++c)}function jr(n,t){return t.angle-n.angle}function Hr(){Jr(this),this.x=this.y=this.arc=this.site=this.cy=null}function Fr(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function Or(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),Jr(t),n.circle=null)}function Yr(n){for(var t,e=Vc,r=De(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!Ir(t,n)||!r(t)||oa(t.a.x-t.b.x)<Aa&&oa(t.a.y-t.b.y)<Aa)&&(t.a=t.b=null,e.splice(u,1))}function Ir(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2;if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function Zr(n,t){this.l=n,this.r=t,this.a=this.b=null}function Vr(n,t,e,r){var u=new Zr(n,t);return Vc.push(u),e&&$r(u,n,t,e),r&&$r(u,t,n,r),Xc[n.i].edges.push(new Br(u,n,t)),Xc[t.i].edges.push(new Br(u,t,n)),u}function Xr(n,t,e){var r=new Zr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function $r(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function Br(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function Wr(){this._=null}function Jr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function Gr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function Kr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Qr(n){for(;n.L;)n=n.L;return n}function nu(n,t){var e,r,u,i=n.sort(tu).pop();for(Vc=[],Xc=new Array(n.length),$c=new Wr,Wc=new Wr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new Pr(i),zr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;qr(u.arc)}t&&(Yr(t),Ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function tu(n,t){return t.y-n.y||t.x-n.x}function eu(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function ru(n){return n.x}function uu(n){return n.y}function iu(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function ou(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&ou(n,c[0],e,r,o,a),c[1]&&ou(n,c[1],o,r,u,a),c[2]&&ou(n,c[2],e,a,o,i),c[3]&&ou(n,c[3],o,a,u,i)}}function au(n,t){n=Xo.rgb(n),t=Xo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+vt(Math.round(e+i*n))+vt(Math.round(r+o*n))+vt(Math.round(u+a*n))}}function cu(n,t){var e,r={},u={};for(e in n)e in t?r[e]=fu(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function su(n,t){return t-=n=+n,function(e){return n+t*e}}function lu(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=Tu(t,e),i=qu(zu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*La,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*La:0}function Tu(n,t){return n[0]*t[0]+n[1]*t[1]}function qu(n){var t=Math.sqrt(Tu(n,n));return t&&(n[0]/=t,n[1]/=t),t}function zu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ru(n,t){var e,r=[],u=[],i=Xo.transform(n),o=Xo.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:su(a[0],c[0])},{i:3,x:su(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function Du(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function Pu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function Uu(n){for(var t=n.source,e=n.target,r=Hu(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function ju(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Hu(n,t){if(n===t)return n;for(var e=ju(n),r=ju(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function Fu(n){n.fixed|=2}function Ou(n){n.fixed&=-7}function Yu(n){n.fixed|=4,n.px=n.x,n.py=n.y}function Iu(n){n.fixed&=-5}function Zu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(Zu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function Vu(n,t){return Xo.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=Wu,n}function Xu(n){return n.children}function $u(n){return n.value}function Bu(n,t){return t.value-n.value}function Wu(n){return Xo.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function Ju(n){return n.x}function Gu(n){return n.y}function Ku(n,t,e){n.y0=t,n.y=e}function Qu(n){return Xo.range(n.length)}function ni(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function ti(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=li(e[i],t),n)>0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function vi(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Mi(r,u=a):Mi(r=c,u),o--):(xi(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)ki(u[i],t,e,r)}function Ei(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Ai(n){return 1+Xo.max(n,function(n){return n.y})}function Ci(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ni(n){var t=n.children;return t&&t.length?Ni(t[0]):n}function Li(n){var t,e=n.children;return e&&(t=e.length)?Li(e[t-1]):n}function Ti(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function qi(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function zi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():zi(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=Xo.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function Hi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=zi(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=zi(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return zi(t.a[0])},e.copy=function(){return Ji(n,t)},e.domain(n)}function Gi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=Xo.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[Xo.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort(Xo.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return Gi(n,t)},e()}function Ki(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=br,r=wr,u=be,i=oo,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=Ms.get(n)||oo).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function oo(n){return n.join("L")}function ao(n){return oo(n)+"Z"}function co(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function lo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function fo(n,t){return n.length<4?oo(n):n[1]+po(n.slice(1,n.length-1),vo(n,t))}function ho(n,t){return n.length<3?oo(n):n[0]+po((n.push(n[0]),n),vo([n[n.length-2]].concat(n,[n[1]]),t))}function go(n,t){return n.length<3?oo(n):n[0]+po(n,vo(n,t))}function po(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return oo(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function vo(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function mo(n){if(n.length<3)return oo(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",_o(ws,o),",",_o(ws,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),bo(c,o,a);return n.pop(),c.push("L",r),c.join("")}function yo(n){if(n.length<4)return oo(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(_o(ws,i)+","+_o(ws,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),bo(e,i,o);return e.join("")}function xo(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[_o(ws,o),",",_o(ws,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),bo(t,o,a);return t.join("")}function Mo(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return mo(n)}function _o(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function bo(n,t,e){n.push("C",_o(_s,t),",",_o(_s,e),",",_o(bs,t),",",_o(bs,e),",",_o(ws,t),",",_o(ws,e))}function wo(n,t){return(t[1]-n[1])/(t[0]-n[0])}function So(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=wo(u,i);++t<e;)r[t]=(o+(o=wo(u=i,i=n[t+1])))/2;return r[t]=o,r}function ko(n){for(var t,e,r,u,i=[],o=So(n),a=-1,c=n.length-1;++a<c;)t=wo(n[a],n[a+1]),oa(t)<Aa?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ys,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Co(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=_t(e),_=_t(u),b=e===r?function(){return g}:_t(r),w=u===i?function(){return p}:_t(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=br,r=br,u=0,i=wr,o=be,a=oo,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=Ms.get(n)||oo).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function No(n){return n.radius}function Lo(n){return[n.x,n.y]}function To(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ys;return[e*Math.cos(r),e*Math.sin(r)]}}function qo(){return 64}function zo(){return"circle"}function Ro(n){var t=Math.sqrt(n/Sa);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Do(n,t){return fa(n,Ns),n.id=t,n}function Po(n,t,e,r){var u=n.id;return R(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function Uo(n){return null==n&&(n=""),function(){this.textContent=n}}function jo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,Xo.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]<js[i]/u?i-1:i]:[Os,Yi(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=zi(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=zi(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.3"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},Xo.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},Xo.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r;return i?u+i*(n[r]-u):u},Xo.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var ia=Xo.bisector(function(n){return n});Xo.bisectLeft=ia.left,Xo.bisect=Xo.bisectRight=ia.right,Xo.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},Xo.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},Xo.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},Xo.transpose=function(n){return Xo.zip.apply(Xo,n)},Xo.keys=function(n){var t=[];for(var e in n)t.push(e);return t},Xo.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},Xo.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},Xo.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},Xo.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:i,get:function(n){return this[aa+n]},set:function(n,t){return this[aa+n]=t},remove:o,keys:a,values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1),this[t])}});var aa="\x00",ca=aa.charCodeAt(0);Xo.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=f(n,t,t[e]);return n};var sa=["webkit","ms","moz","Moz","o","O"];Xo.dispatch=function(){for(var n=new p,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=v(n);return n},p.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=Sizzle,va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return x(i)},da.selectAll=function(n){var t,e,r=[];n=_(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Bo(n.call(e,e.__data__,a,u))),t.parentNode=e);return x(r)};var ma={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};Xo.ns={prefix:ma,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!S(n[u]).test(t))return!1;return!0}for(t in n)this.each(E(t,n[t]));return this}return this.each(E(n,t))},da.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=T(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=T(o);for(;f>r;++r)p[r]=T(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=D([]),s=x([]),l=x([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},da.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},da.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=z.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},da.each=function(n){return R(this,function(t,e,r){n.call(t,t.__data__,e,r)})},da.call=function(n){var t=Bo(arguments);return n.apply(t[0]=this,t),this},da.empty=function(){return!this.node()},da.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return x(o)},ya.insert=function(n,t){return arguments.length<2&&(t=P(this)),da.insert.call(this,n,t)},da.transition=function(){for(var n,t,e=ks||++Ls,r=[],u=Es||{time:Date.now(),ease:yu,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&jo(t,c,e,u),n.push(t)}return Do(r,e)},da.interrupt=function(){return this.each(U)},Xo.select=function(n){var t=["string"==typeof n?ha(n,Wo):n];return t.parentNode=Jo,x([t])},Xo.selectAll=function(n){var t=Bo("string"==typeof n?ga(n,Wo):n);return t.parentNode=Jo,x([t])};var xa=Xo.select(Jo);da.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,Ta=Math.SQRT2,qa=2,za=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(Ta*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(Ta*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+za*f)/(2*i*qa*h),p=(c*c-i*i-za*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Ta;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=T.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=T.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=T.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=T.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",T=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=T.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,T,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<s;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;s>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv(" ","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;zt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:Tt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)ie(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){oe(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)oe(e[r],t,0)},Polygon:function(n,t){ae(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)ae(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)ie(e[r],t)}};Xo.geo.area=function(n){return fc=0,Xo.geo.stream(n,gc),fc};var fc,hc=new re,gc={sphere:function(){fc+=4*Sa},point:g,lineStart:g,lineEnd:g,polygonStart:function(){hc.reset(),gc.lineStart=ce},polygonEnd:function(){var n=2*hc;fc+=0>n?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,Te,ze,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(He)}).raw=He,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t<l.length-h;++t)g.push(n[a[l[t]][2]]);return g}var e=br,r=wr;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Xo.geom.polygon=function(n){return fa(n,Zc),n};var Zc=Xo.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=Cr(n),c=-1,s=this.length-Cr(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Er(o,l,u)?(Er(i,l,u)||n.push(Ar(i,o,l,u)),n.push(o)):Er(i,l,u)&&n.push(Ar(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];Pr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(jr),t.length},Br.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},Wr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Qr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(Gr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Kr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(Kr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Gr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Qr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,Gr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,Kr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,Gr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,Kr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,Gr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,Kr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},Xo.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return nu(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&eu(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=_t(r=n),t):r},t.y=function(n){return arguments.length?(o=_t(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];Xo.geom.delaunay=function(n){return Xo.geom.voronoi().triangles(n)},Xo.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(oa(c-e)+oa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=br,c=wr;return(o=arguments.length)?(a=ru,c=uu,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},Xo.interpolateRgb=au,Xo.interpolateObject=cu,Xo.interpolateNumber=su,Xo.interpolateString=lu;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;Xo.interpolate=fu,Xo.interpolators=[function(n,t){var e=typeof t;return("string"===e?Va.has(t)||/^(#|rgb\(|hsl\()/.test(t)?au:lu:t instanceof G?au:"object"===e?Array.isArray(t)?hu:cu:su)(n,t)}],Xo.interpolateArray=hu;var ns=function(){return bt},ts=Xo.map({linear:ns,poly:xu,quad:function(){return du},cubic:function(){return mu},sin:function(){return Mu},exp:function(){return _u},circle:function(){return bu},elastic:wu,back:Su,bounce:function(){return ku}}),es=Xo.map({"in":bt,out:pu,"in-out":vu,"out-in":function(n){return vu(pu(n))}});Xo.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Uu(n[e]));return t}},Xo.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=Xo.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push(Xo.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(ka-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},Xo.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=u-e,c=i*i+o*o;if(c>a*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=m.length,l=y.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=Bu,u=Xu,i=$u;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},Xo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=Xo.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},Vu(e,r)},Xo.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/Xo.sum(o),s=Xo.range(i.length);null!=e&&s.sort(e===as?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=as,r=0,u=ka;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var as={};Xo.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=Xo.permute(s,f),l=Xo.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;vi(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=si(a),i=ci(i),a&&i;)c=ci(c),o=si(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=Xo.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ti,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ti(t):qi(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return qi(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ti:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},Vu(i,a)},Xo.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[])},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(To(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=zo,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),jo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return Do(i,u)},Ns.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=_(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&jo(u,g,o,i),t.push(u)}return Do(a,o)},Ns.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=Ts,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":Ts,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ts="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return zs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=f[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=Ri(t),c=a[0],s=a[1],p=L[e],v=e?f:l,d=v[1]-v[0];return C&&(c-=p,s-=d+p),r=(e?g:h)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=p)+d:(x&&(p=Math.max(c,Math.min(s,2*x[e]-r))),r>p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),T=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?T.on("touchmove.brush",v).on("touchend.brush",y):T.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],f[1-z]-L[1]],L[0]=l[q],L[1]=f[z]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var zs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(Math.ceil(n/e)*e,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}();'use strict';(function(window){window.define=undefined;}).call(this,this);'use strict';tr.exportTo('tr.ui.b',function(){const DataSeriesEnableChangeEventType='data-series-enabled-change';const THIS_DOC=document.currentScript.ownerDocument;const svgNS='http://www.w3.org/2000/svg';const ColorScheme=tr.b.ColorScheme;function getColorOfKey(key,selected){let id=ColorScheme.getColorIdForGeneralPurposeString(key);if(selected){id+=ColorScheme.properties.brightenedOffsets[0];} return ColorScheme.colorsAsStrings[id];} function getSVGTextSize(parentNode,text,opt_callback,opt_this){const textNode=document.createElementNS('http://www.w3.org/2000/svg','text');textNode.setAttributeNS(null,'x',0);textNode.setAttributeNS(null,'y',0);textNode.setAttributeNS(null,'fill','black');textNode.appendChild(document.createTextNode(text));parentNode.appendChild(textNode);if(opt_callback){opt_callback.call(opt_this||parentNode,textNode);} const width=textNode.getComputedTextLength();const height=textNode.getBBox().height;parentNode.removeChild(textNode);return{width,height};} @@ -8068,7 +8068,7 @@ return snapshot;} function findAllEvents(rendererHelper,category,title){const targetEvents=[];for(const ev of rendererHelper.process.getDescendantEvents()){if(!hasCategoryAndName(ev,category,title))continue;targetEvents.push(ev);} return targetEvents;} -const URL_EXCLUSION=['','about:blank','data:text/html,pluginplaceholderdata','chrome-error://chromewebdata/'];function shouldIgnoreURL(url){return URL_EXCLUSION.includes(url);} +const URL_BLACKLIST=['','about:blank','data:text/html,pluginplaceholderdata','chrome-error://chromewebdata/'];function shouldIgnoreURL(url){return URL_BLACKLIST.includes(url);} function collectTimeToEvent(category,eventName,rendererHelper,frameToNavStartEvents){const targetEvents=findAllEvents(rendererHelper,category,eventName);const samples=[];for(const ev of targetEvents){if(rendererHelper.isTelemetryInternalEvent(ev))continue;const frameIdRef=ev.args.frame;const snapshot=findFrameLoaderSnapshotAt(rendererHelper,frameIdRef,ev.start);if(snapshot===undefined||!snapshot.args.isLoadingMainFrame)continue;const url=snapshot.args.documentLoaderURL;if(shouldIgnoreURL(url))continue;const navigationStartEvent=EventFinderUtils.findLastEventStartingOnOrBeforeTimestamp(frameToNavStartEvents.get(frameIdRef)||[],ev.start);if(navigationStartEvent===undefined)continue;const navStartToEventRange=tr.b.math.Range.fromExplicitRange(navigationStartEvent.start,ev.start);const networkEvents=getNetworkEventsInRange(rendererHelper.process,navStartToEventRange);const breakdownTree=tr.metrics.sh.generateWallClockTimeBreakdownTree(rendererHelper.mainThread,networkEvents,navStartToEventRange);samples.push({value:navStartToEventRange.duration,breakdownTree,diagnostics:{breakdown:createBreakdownDiagnostic(breakdownTree),url:new tr.v.d.GenericSet([url]),Start:new RelatedEventSet(navigationStartEvent),End:new RelatedEventSet(ev)}});} return samples;} function addFirstMeaningfulPaintSample(samples,rendererHelper,navigationStart,fmpMarkerEvent,url){const navStartToFMPRange=tr.b.math.Range.fromExplicitRange(navigationStart.start,fmpMarkerEvent.start);const networkEvents=getNetworkEventsInRange(rendererHelper.process,navStartToFMPRange);const timeToFirstMeaningfulPaint=navStartToFMPRange.duration;const breakdownTree=tr.metrics.sh.generateWallClockTimeBreakdownTree(rendererHelper.mainThread,networkEvents,navStartToFMPRange);samples.push({value:timeToFirstMeaningfulPaint,breakdownTree,diagnostics:{breakdown:createBreakdownDiagnostic(breakdownTree),start:new RelatedEventSet(navigationStart),end:new RelatedEventSet(fmpMarkerEvent),infos:new tr.v.d.GenericSet([{url,pid:rendererHelper.pid,start:navigationStart.start,fmp:fmpMarkerEvent.start,}]),}});} @@ -8083,7 +8083,7 @@ function addSamplesToHistogram(samples,histogram,histograms){for(const sample of samples){histogram.addSample(sample.value,sample.diagnostics);if(histogram.name!=='timeToFirstContentfulPaint')continue;if(!sample.breakdownTree)continue;for(const[category,breakdown]of Object.entries(sample.breakdownTree)){const relatedName=`${histogram.name}:${category}`;let relatedHist=histograms.getHistogramsNamed(relatedName)[0];if(!relatedHist){relatedHist=histograms.createHistogram(relatedName,histogram.unit,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,summaryOptions:{count:false,max:false,min:false,sum:false,},});let relatedNames=histogram.diagnostics.get('breakdown');if(!relatedNames){relatedNames=new tr.v.d.RelatedNameMap();histogram.diagnostics.set('breakdown',relatedNames);} relatedNames.set(category,relatedName);} relatedHist.addSample(breakdown.total,{breakdown:tr.v.d.Breakdown.fromEntries(Object.entries(breakdown.events)),});}}} -function loadingMetric(histograms,model){const firstPaintHistogram=histograms.createHistogram('timeToFirstPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first paint',summaryOptions:SUMMARY_OPTIONS,});const firstContentfulPaintHistogram=histograms.createHistogram('timeToFirstContentfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first contentful paint',summaryOptions:SUMMARY_OPTIONS,});const onLoadHistogram=histograms.createHistogram('timeToOnload',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to onload. '+'This is temporary metric used for PCv1/v2 correctness checking',summaryOptions:SUMMARY_OPTIONS,});const firstMeaningfulPaintHistogram=histograms.createHistogram('timeToFirstMeaningfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first meaningful paint',summaryOptions:SUMMARY_OPTIONS,});const firstMeaningfulPaintCpuTimeHistogram=histograms.createHistogram('cpuTimeToFirstMeaningfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'CPU time to first meaningful paint',summaryOptions:SUMMARY_OPTIONS,});const timeToInteractiveHistogram=histograms.createHistogram('timeToInteractive',timeDurationInMs_smallerIsBetter,[],{binBoundaries:TIME_TO_INTERACTIVE_BOUNDARIES,description:'Time to Interactive',summaryOptions:SUMMARY_OPTIONS,});const timeToFirstCpuIdleHistogram=histograms.createHistogram('timeToFirstCpuIdle',timeDurationInMs_smallerIsBetter,[],{binBoundaries:TIME_TO_INTERACTIVE_BOUNDARIES,description:'Time to First CPU Idle',summaryOptions:SUMMARY_OPTIONS,});const chromeHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);for(const pid in chromeHelper.rendererHelpers){const rendererHelper=chromeHelper.rendererHelpers[pid];if(rendererHelper.isChromeTracingUI)continue;const samplesSet=collectLoadingMetricsForRenderer(rendererHelper);addSamplesToHistogram(samplesSet.firstPaintSamples,firstPaintHistogram,histograms);addSamplesToHistogram(samplesSet.firstContentfulPaintSamples,firstContentfulPaintHistogram,histograms);addSamplesToHistogram(samplesSet.onLoadSamples,onLoadHistogram,histograms);addSamplesToHistogram(samplesSet.firstMeaningfulPaintSamples,firstMeaningfulPaintHistogram,histograms);addSamplesToHistogram(samplesSet.firstMeaningfulPaintCpuTimeSamples,firstMeaningfulPaintCpuTimeHistogram,histograms);addSamplesToHistogram(samplesSet.interactiveSamples,timeToInteractiveHistogram,histograms);addSamplesToHistogram(samplesSet.firstCpuIdleSamples,timeToFirstCpuIdleHistogram,histograms);}} +function loadingMetric(histograms,model){const firstPaintHistogram=histograms.createHistogram('timeToFirstPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first paint',summaryOptions:SUMMARY_OPTIONS,});const firstContentfulPaintHistogram=histograms.createHistogram('timeToFirstContentfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first contentful paint',summaryOptions:SUMMARY_OPTIONS,});const onLoadHistogram=histograms.createHistogram('timeToOnload',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to onload. '+'This is temporary metric used for PCv1/v2 sanity checking',summaryOptions:SUMMARY_OPTIONS,});const firstMeaningfulPaintHistogram=histograms.createHistogram('timeToFirstMeaningfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first meaningful paint',summaryOptions:SUMMARY_OPTIONS,});const firstMeaningfulPaintCpuTimeHistogram=histograms.createHistogram('cpuTimeToFirstMeaningfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'CPU time to first meaningful paint',summaryOptions:SUMMARY_OPTIONS,});const timeToInteractiveHistogram=histograms.createHistogram('timeToInteractive',timeDurationInMs_smallerIsBetter,[],{binBoundaries:TIME_TO_INTERACTIVE_BOUNDARIES,description:'Time to Interactive',summaryOptions:SUMMARY_OPTIONS,});const timeToFirstCpuIdleHistogram=histograms.createHistogram('timeToFirstCpuIdle',timeDurationInMs_smallerIsBetter,[],{binBoundaries:TIME_TO_INTERACTIVE_BOUNDARIES,description:'Time to First CPU Idle',summaryOptions:SUMMARY_OPTIONS,});const chromeHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);for(const pid in chromeHelper.rendererHelpers){const rendererHelper=chromeHelper.rendererHelpers[pid];if(rendererHelper.isChromeTracingUI)continue;const samplesSet=collectLoadingMetricsForRenderer(rendererHelper);addSamplesToHistogram(samplesSet.firstPaintSamples,firstPaintHistogram,histograms);addSamplesToHistogram(samplesSet.firstContentfulPaintSamples,firstContentfulPaintHistogram,histograms);addSamplesToHistogram(samplesSet.onLoadSamples,onLoadHistogram,histograms);addSamplesToHistogram(samplesSet.firstMeaningfulPaintSamples,firstMeaningfulPaintHistogram,histograms);addSamplesToHistogram(samplesSet.firstMeaningfulPaintCpuTimeSamples,firstMeaningfulPaintCpuTimeHistogram,histograms);addSamplesToHistogram(samplesSet.interactiveSamples,timeToInteractiveHistogram,histograms);addSamplesToHistogram(samplesSet.firstCpuIdleSamples,timeToFirstCpuIdleHistogram,histograms);}} tr.metrics.MetricRegistry.register(loadingMetric);return{loadingMetric,getNetworkEventsInRange,collectLoadingMetricsForRenderer,};});'use strict';tr.exportTo('tr.metrics',function(){const SPA_NAVIGATION_START_TO_FIRST_PAINT_DURATION_BIN_BOUNDARY=tr.v.HistogramBinBoundaries.createExponential(1,1000,50);function spaNavigationMetric(histograms,model){const histogram=new tr.v.Histogram('spaNavigationStartToFpDuration',tr.b.Unit.byName.timeDurationInMs_smallerIsBetter,SPA_NAVIGATION_START_TO_FIRST_PAINT_DURATION_BIN_BOUNDARY);histogram.description='Latency between the input event causing'+' a SPA navigation and the first paint event after it';histogram.customizeSummaryOptions({count:false,sum:false,});const modelHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);if(!modelHelper){return;} const rendererHelpers=modelHelper.rendererHelpers;if(!rendererHelpers){return;} const browserHelper=modelHelper.browserHelper;for(const rendererHelper of Object.values(rendererHelpers)){const spaNavigations=tr.metrics.findSpaNavigationsOnRenderer(rendererHelper,browserHelper);for(const spaNav of spaNavigations){let beginTs=0;if(spaNav.navStartCandidates.inputLatencyAsyncSlice){const beginData=spaNav.navStartCandidates.inputLatencyAsyncSlice.args.data;beginTs=model.convertTimestampToModelTime('traceEventClock',beginData.INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT.time);}else{beginTs=spaNav.navStartCandidates.goToIndexSlice.start;} @@ -8117,7 +8117,7 @@ const benchmarks=hist.diagnostics.get(tr.v.d.RESERVED_NAMES.BENCHMARKS);const start=hist.diagnostics.get(tr.v.d.RESERVED_NAMES.BENCHMARK_START);if(benchmarks===undefined){if(start===undefined)return'Value';return start.toString();} const benchmarksStr=Array.from(benchmarks).join('\n');if(start===undefined)return benchmarksStr;return benchmarksStr+'\n'+start.toString();});class GenericSetGrouping extends HistogramGrouping{constructor(name){super(name,undefined);this.callback_=this.compute_.bind(this);} compute_(hist){const diag=hist.diagnostics.get(this.key);if(diag===undefined)return'';const parts=Array.from(diag);parts.sort();return parts.join(',');}} -GenericSetGrouping.NAMES=[tr.v.d.RESERVED_NAMES.ARCHITECTURES,tr.v.d.RESERVED_NAMES.BENCHMARKS,tr.v.d.RESERVED_NAMES.BOTS,tr.v.d.RESERVED_NAMES.BUILDS,tr.v.d.RESERVED_NAMES.DEVICE_IDS,tr.v.d.RESERVED_NAMES.PRIMARYS,tr.v.d.RESERVED_NAMES.MEMORY_AMOUNTS,tr.v.d.RESERVED_NAMES.OS_NAMES,tr.v.d.RESERVED_NAMES.OS_VERSIONS,tr.v.d.RESERVED_NAMES.PRODUCT_VERSIONS,tr.v.d.RESERVED_NAMES.STORIES,tr.v.d.RESERVED_NAMES.STORYSET_REPEATS,tr.v.d.RESERVED_NAMES.STORY_TAGS,];for(const name of GenericSetGrouping.NAMES){new GenericSetGrouping(name);} +GenericSetGrouping.NAMES=[tr.v.d.RESERVED_NAMES.ARCHITECTURES,tr.v.d.RESERVED_NAMES.BENCHMARKS,tr.v.d.RESERVED_NAMES.BOTS,tr.v.d.RESERVED_NAMES.BUILDS,tr.v.d.RESERVED_NAMES.DEVICE_IDS,tr.v.d.RESERVED_NAMES.MASTERS,tr.v.d.RESERVED_NAMES.MEMORY_AMOUNTS,tr.v.d.RESERVED_NAMES.OS_NAMES,tr.v.d.RESERVED_NAMES.OS_VERSIONS,tr.v.d.RESERVED_NAMES.PRODUCT_VERSIONS,tr.v.d.RESERVED_NAMES.STORIES,tr.v.d.RESERVED_NAMES.STORYSET_REPEATS,tr.v.d.RESERVED_NAMES.STORY_TAGS,];for(const name of GenericSetGrouping.NAMES){new GenericSetGrouping(name);} class DateRangeGrouping extends HistogramGrouping{constructor(name){super(name,undefined);this.callback_=this.compute_.bind(this);} compute_(hist){const diag=hist.diagnostics.get(this.key);if(diag===undefined)return'';return diag.toString();}} DateRangeGrouping.NAMES=[tr.v.d.RESERVED_NAMES.BENCHMARK_START,tr.v.d.RESERVED_NAMES.TRACE_START,];for(const name of DateRangeGrouping.NAMES){new DateRangeGrouping(name);} @@ -10018,10 +10018,10 @@ root 504 2 0 0 rescuer_thread 0 S [sb-1] 5 root 505 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread1] 5 root 506 2 0 0 irq_thread 0 S [irq/308-mnh-rea] 5 -root 507 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl1] 5 +root 507 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl1] 5 root 508 2 0 0 rescuer_thread 0 S [sb-3] 5 root 509 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread3] 5 -root 510 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl3] 5 +root 510 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl3] 5 root 511 2 0 0 rescuer_thread 0 S [tbn_event_proce] 5 root 512 2 0 0 irq_thread 0 S [irq/78-qpnp_fla] 5 root 513 2 0 0 irq_thread 0 S [irq/77-qpnp_fla] 5 @@ -10052,7 +10052,7 @@ root 546 2 0 0 irq_thread 0 S [irq/381-fts] 5 logd 555 1 30408 13584 SyS_rt_sigsuspend 0 S logd logd system 556 1 15876 3680 do_wait 0 S qseecomd qseecomd -system 557 1 17460 3664 binder_ioctl 0 S [email protected] [email protected] +system 557 1 17460 3664 binder_ioctl 0 S [email protected] [email protected] root 559 2 0 0 kthread_worker_fn 0 S [sugov:0] 5 root 560 2 0 0 kthread_worker_fn 0 S [sugov:4] 5 root 565 2 0 0 kauditd_thread 0 S [kauditd] 5 @@ -10065,7 +10065,7 @@ system 588 1 11188 2556 binder_ioctl 0 S vndservicemanager vndservicemanager root 590 1 12516 3092 binder_ioctl 0 S [email protected] [email protected] system 591 1 13968 3296 binder_ioctl 0 S [email protected] [email protected] -hsm 592 1 2127108 6120 binder_ioctl 0 S [email protected] [email protected] +hsm 592 1 2127108 6120 binder_ioctl 0 S [email protected] [email protected] hsm 593 1 16388 2880 binder_ioctl 0 S citadeld citadeld system 595 1 20052 4528 do_sys_poll 0 S sscrpcd sscrpcd system 598 1 2142956 14468 binder_ioctl 0 S [email protected] [email protected] @@ -10728,7 +10728,7 @@ logd 555 573 logd.klogd logd 555 574 logd.auditd system 556 556 qseecomd -system 557 557 [email protected] +system 557 557 [email protected] system 557 602 HwBinder:557_1 root 559 559 sugov:0 root 560 560 sugov:4 @@ -10757,7 +10757,7 @@ system 588 588 vndservicemanag root 590 590 [email protected] system 591 591 [email protected] -hsm 592 592 [email protected] +hsm 592 592 [email protected] hsm 593 593 citadeld hsm 593 603 citadeld hsm 593 604 Binder:593_1 @@ -12770,10 +12770,10 @@ root 504 2 0 0 rescuer_thread 0 S [sb-1] 5 root 505 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread1] 5 root 506 2 0 0 irq_thread 0 S [irq/308-mnh-rea] 5 -root 507 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl1] 5 +root 507 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl1] 5 root 508 2 0 0 rescuer_thread 0 S [sb-3] 5 root 509 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread3] 5 -root 510 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl3] 5 +root 510 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl3] 5 root 511 2 0 0 rescuer_thread 0 S [tbn_event_proce] 5 root 512 2 0 0 irq_thread 0 S [irq/78-qpnp_fla] 5 root 513 2 0 0 irq_thread 0 S [irq/77-qpnp_fla] 5 @@ -12804,7 +12804,7 @@ root 546 2 0 0 irq_thread 0 S [irq/381-fts] 5 logd 555 1 30408 13652 SyS_rt_sigsuspend 0 S logd logd system 556 1 15876 3680 do_wait 0 S qseecomd qseecomd -system 557 1 17460 3664 binder_ioctl 0 S [email protected] [email protected] +system 557 1 17460 3664 binder_ioctl 0 S [email protected] [email protected] root 559 2 0 0 kthread_worker_fn 0 S [sugov:0] 5 root 560 2 0 0 kthread_worker_fn 0 S [sugov:4] 5 root 565 2 0 0 kauditd_thread 0 S [kauditd] 5 @@ -12817,7 +12817,7 @@ system 588 1 11188 2556 binder_ioctl 0 S vndservicemanager vndservicemanager root 590 1 12516 3092 binder_ioctl 0 S [email protected] [email protected] system 591 1 13968 3296 binder_ioctl 0 S [email protected] [email protected] -hsm 592 1 2127108 6120 binder_ioctl 0 S [email protected] [email protected] +hsm 592 1 2127108 6120 binder_ioctl 0 S [email protected] [email protected] hsm 593 1 16388 2880 binder_ioctl 0 S citadeld citadeld system 595 1 20052 4528 do_sys_poll 0 S sscrpcd sscrpcd system 598 1 2142956 14468 binder_ioctl 0 S [email protected] [email protected] @@ -13480,7 +13480,7 @@ logd 555 573 logd.klogd logd 555 574 logd.auditd system 556 556 qseecomd -system 557 557 [email protected] +system 557 557 [email protected] system 557 602 HwBinder:557_1 root 559 559 sugov:0 root 560 560 sugov:4 @@ -13509,7 +13509,7 @@ system 588 588 vndservicemanag root 590 590 [email protected] system 591 591 [email protected] -hsm 592 592 [email protected] +hsm 592 592 [email protected] hsm 593 593 citadeld hsm 593 603 citadeld hsm 593 604 Binder:593_1
diff --git a/tools/systrace_parser/parser/test/hdrnet.html b/tools/systrace_parser/parser/test/hdrnet.html index b09406b..5f430bb 100644 --- a/tools/systrace_parser/parser/test/hdrnet.html +++ b/tools/systrace_parser/parser/test/hdrnet.html
@@ -4734,7 +4734,7 @@ visitedDomainIds.add(current.domainId);const outgoingTransformers=this.transformerMapByDomainId_[current.domainId];if(!outgoingTransformers)continue;for(const outgoingDomainId in outgoingTransformers){const toNextDomainTransformer=outgoingTransformers[outgoingDomainId];const toCurrentDomainTransformer=current.transformer;queue.push({domainId:outgoingDomainId,transformer:Transformer.compose(toNextDomainTransformer,toCurrentDomainTransformer)});}} return undefined;},selectModelDomainId_(){this.ensureAllDomainsAreConnected_();for(const chromeDomainId of POSSIBLE_CHROME_CLOCK_DOMAINS){if(this.domainsSeen_.has(chromeDomainId)){this.modelDomainId_=chromeDomainId;return;}} const domainsSeenArray=Array.from(this.domainsSeen_);domainsSeenArray.sort();this.modelDomainId_=domainsSeenArray[0];},ensureAllDomainsAreConnected_(){let firstDomainId=undefined;for(const domainId of this.domainsSeen_){if(!firstDomainId){firstDomainId=domainId;continue;} -if(!this.getTransformerBetween_(firstDomainId,domainId)){throw new Error('Unable to select a primary clock domain because no '+'path can be found from "'+firstDomainId+'" to "'+domainId+'".');}} +if(!this.getTransformerBetween_(firstDomainId,domainId)){throw new Error('Unable to select a master clock domain because no '+'path can be found from "'+firstDomainId+'" to "'+domainId+'".');}} return true;},onDomainSeen_(domainId){if(domainId===ClockDomainId.UNKNOWN_CHROME_LEGACY&&!this.domainsSeen_.has(ClockDomainId.UNKNOWN_CHROME_LEGACY)){for(const chromeDomainId of POSSIBLE_CHROME_CLOCK_DOMAINS){if(chromeDomainId===ClockDomainId.UNKNOWN_CHROME_LEGACY){continue;} this.collapseDomains_(ClockDomainId.UNKNOWN_CHROME_LEGACY,chromeDomainId);}} this.domainsSeen_.add(domainId);},onSyncCompleted_(marker1,marker2){const forwardTransformer=Transformer.fromMarkers(marker1,marker2);const backwardTransformer=Transformer.fromMarkers(marker2,marker1);const existingTransformer=this.getOrCreateTransformerMap_(marker1.domainId)[marker2.domainId];if(!existingTransformer||forwardTransformer.error<existingTransformer.error){this.getOrCreateTransformerMap_(marker1.domainId)[marker2.domainId]=forwardTransformer;this.getOrCreateTransformerMap_(marker2.domainId)[marker1.domainId]=backwardTransformer;}},collapseDomains_(domain1Id,domain2Id){this.getOrCreateTransformerMap_(domain1Id)[domain2Id]=this.getOrCreateTransformerMap_(domain2Id)[domain1Id]=Transformer.IDENTITY;},getOrCreateTransformerMap_(domainId){if(!this.transformerMapByDomainId_[domainId]){this.transformerMapByDomainId_[domainId]={};} @@ -5152,7 +5152,7 @@ static uint8ArrayToString_(arr){if(typeof TextDecoder!=='undefined'){const decoder=new TextDecoder('utf-8');return decoder.decode(arr);} const c=[];for(let i=0;i<arr.length;i+=MAX_FUNCTION_ARGS_COUNT){c.push(String.fromCharCode(...arr.subarray(i,i+MAX_FUNCTION_ARGS_COUNT)));} return c.join('');}} -return{InMemoryTraceStream,};});!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).pako=t()}}(function(){return function t(e,a,i){function n(s,o){if(!a[s]){if(!e[s]){var l="function"==typeof require&&require;if(!o&&l)return l(s,!0);if(r)return r(s,!0);var h=new Error("Cannot find module '"+s+"'");throw h.code="MODULE_NOT_FOUND",h}var d=a[s]={exports:{}};e[s][0].call(d.exports,function(t){var a=e[s][1][t];return n(a||t)},d,d.exports,t,e,a,i)}return a[s].exports}for(var r="function"==typeof require&&require,s=0;s<i.length;s++)n(i[s]);return n}({1:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({level:_,method:c,chunkSize:16384,windowBits:15,memLevel:8,strategy:u,to:""},t||{});var e=this.options;e.raw&&e.windowBits>0?e.windowBits=-e.windowBits:e.gzip&&e.windowBits>0&&e.windowBits<16&&(e.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new h,this.strm.avail_out=0;var a=r.deflateInit2(this.strm,e.level,e.method,e.windowBits,e.memLevel,e.strategy);if(a!==f)throw new Error(l[a]);if(e.header&&r.deflateSetHeader(this.strm,e.header),e.dictionary){var n;if(n="string"==typeof e.dictionary?o.string2buf(e.dictionary):"[object ArrayBuffer]"===d.call(e.dictionary)?new Uint8Array(e.dictionary):e.dictionary,(a=r.deflateSetDictionary(this.strm,n))!==f)throw new Error(l[a]);this._dict_set=!0}}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||l[a.err];return a.result}var r=t("./zlib/deflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/messages"),h=t("./zlib/zstream"),d=Object.prototype.toString,f=0,_=-1,u=0,c=8;i.prototype.push=function(t,e){var a,i,n=this.strm,l=this.options.chunkSize;if(this.ended)return!1;i=e===~~e?e:!0===e?4:0,"string"==typeof t?n.input=o.string2buf(t):"[object ArrayBuffer]"===d.call(t)?n.input=new Uint8Array(t):n.input=t,n.next_in=0,n.avail_in=n.input.length;do{if(0===n.avail_out&&(n.output=new s.Buf8(l),n.next_out=0,n.avail_out=l),1!==(a=r.deflate(n,i))&&a!==f)return this.onEnd(a),this.ended=!0,!1;0!==n.avail_out&&(0!==n.avail_in||4!==i&&2!==i)||("string"===this.options.to?this.onData(o.buf2binstring(s.shrinkBuf(n.output,n.next_out))):this.onData(s.shrinkBuf(n.output,n.next_out)))}while((n.avail_in>0||0===n.avail_out)&&1!==a);return 4===i?(a=r.deflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===f):2!==i||(this.onEnd(f),n.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===f&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Deflate=i,a.deflate=n,a.deflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.gzip=function(t,e){return e=e||{},e.gzip=!0,n(t,e)}},{"./utils/common":3,"./utils/strings":4,"./zlib/deflate":8,"./zlib/messages":13,"./zlib/zstream":15}],2:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({chunkSize:16384,windowBits:0,to:""},t||{});var e=this.options;e.raw&&e.windowBits>=0&&e.windowBits<16&&(e.windowBits=-e.windowBits,0===e.windowBits&&(e.windowBits=-15)),!(e.windowBits>=0&&e.windowBits<16)||t&&t.windowBits||(e.windowBits+=32),e.windowBits>15&&e.windowBits<48&&0==(15&e.windowBits)&&(e.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new d,this.strm.avail_out=0;var a=r.inflateInit2(this.strm,e.windowBits);if(a!==l.Z_OK)throw new Error(h[a]);this.header=new f,r.inflateGetHeader(this.strm,this.header)}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||h[a.err];return a.result}var r=t("./zlib/inflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/constants"),h=t("./zlib/messages"),d=t("./zlib/zstream"),f=t("./zlib/gzheader"),_=Object.prototype.toString;i.prototype.push=function(t,e){var a,i,n,h,d,f,u=this.strm,c=this.options.chunkSize,b=this.options.dictionary,g=!1;if(this.ended)return!1;i=e===~~e?e:!0===e?l.Z_FINISH:l.Z_NO_FLUSH,"string"==typeof t?u.input=o.binstring2buf(t):"[object ArrayBuffer]"===_.call(t)?u.input=new Uint8Array(t):u.input=t,u.next_in=0,u.avail_in=u.input.length;do{if(0===u.avail_out&&(u.output=new s.Buf8(c),u.next_out=0,u.avail_out=c),(a=r.inflate(u,l.Z_NO_FLUSH))===l.Z_NEED_DICT&&b&&(f="string"==typeof b?o.string2buf(b):"[object ArrayBuffer]"===_.call(b)?new Uint8Array(b):b,a=r.inflateSetDictionary(this.strm,f)),a===l.Z_BUF_ERROR&&!0===g&&(a=l.Z_OK,g=!1),a!==l.Z_STREAM_END&&a!==l.Z_OK)return this.onEnd(a),this.ended=!0,!1;u.next_out&&(0!==u.avail_out&&a!==l.Z_STREAM_END&&(0!==u.avail_in||i!==l.Z_FINISH&&i!==l.Z_SYNC_FLUSH)||("string"===this.options.to?(n=o.utf8border(u.output,u.next_out),h=u.next_out-n,d=o.buf2string(u.output,n),u.next_out=h,u.avail_out=c-h,h&&s.arraySet(u.output,u.output,n,h,0),this.onData(d)):this.onData(s.shrinkBuf(u.output,u.next_out)))),0===u.avail_in&&0===u.avail_out&&(g=!0)}while((u.avail_in>0||0===u.avail_out)&&a!==l.Z_STREAM_END);return a===l.Z_STREAM_END&&(i=l.Z_FINISH),i===l.Z_FINISH?(a=r.inflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===l.Z_OK):i!==l.Z_SYNC_FLUSH||(this.onEnd(l.Z_OK),u.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===l.Z_OK&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Inflate=i,a.inflate=n,a.inflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.ungzip=n},{"./utils/common":3,"./utils/strings":4,"./zlib/constants":6,"./zlib/gzheader":9,"./zlib/inflate":11,"./zlib/messages":13,"./zlib/zstream":15}],3:[function(t,e,a){"use strict";function i(t,e){return Object.prototype.hasOwnProperty.call(t,e)}var n="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;a.assign=function(t){for(var e=Array.prototype.slice.call(arguments,1);e.length;){var a=e.shift();if(a){if("object"!=typeof a)throw new TypeError(a+"must be non-object");for(var n in a)i(a,n)&&(t[n]=a[n])}}return t},a.shrinkBuf=function(t,e){return t.length===e?t:t.subarray?t.subarray(0,e):(t.length=e,t)};var r={arraySet:function(t,e,a,i,n){if(e.subarray&&t.subarray)t.set(e.subarray(a,a+i),n);else for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){var e,a,i,n,r,s;for(i=0,e=0,a=t.length;e<a;e++)i+=t[e].length;for(s=new Uint8Array(i),n=0,e=0,a=t.length;e<a;e++)r=t[e],s.set(r,n),n+=r.length;return s}},s={arraySet:function(t,e,a,i,n){for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){return[].concat.apply([],t)}};a.setTyped=function(t){t?(a.Buf8=Uint8Array,a.Buf16=Uint16Array,a.Buf32=Int32Array,a.assign(a,r)):(a.Buf8=Array,a.Buf16=Array,a.Buf32=Array,a.assign(a,s))},a.setTyped(n)},{}],4:[function(t,e,a){"use strict";function i(t,e){if(e<65537&&(t.subarray&&s||!t.subarray&&r))return String.fromCharCode.apply(null,n.shrinkBuf(t,e));for(var a="",i=0;i<e;i++)a+=String.fromCharCode(t[i]);return a}var n=t("./common"),r=!0,s=!0;try{String.fromCharCode.apply(null,[0])}catch(t){r=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(t){s=!1}for(var o=new n.Buf8(256),l=0;l<256;l++)o[l]=l>=252?6:l>=248?5:l>=240?4:l>=224?3:l>=192?2:1;o[254]=o[254]=1,a.string2buf=function(t){var e,a,i,r,s,o=t.length,l=0;for(r=0;r<o;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),l+=a<128?1:a<2048?2:a<65536?3:4;for(e=new n.Buf8(l),s=0,r=0;s<l;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),a<128?e[s++]=a:a<2048?(e[s++]=192|a>>>6,e[s++]=128|63&a):a<65536?(e[s++]=224|a>>>12,e[s++]=128|a>>>6&63,e[s++]=128|63&a):(e[s++]=240|a>>>18,e[s++]=128|a>>>12&63,e[s++]=128|a>>>6&63,e[s++]=128|63&a);return e},a.buf2binstring=function(t){return i(t,t.length)},a.binstring2buf=function(t){for(var e=new n.Buf8(t.length),a=0,i=e.length;a<i;a++)e[a]=t.charCodeAt(a);return e},a.buf2string=function(t,e){var a,n,r,s,l=e||t.length,h=new Array(2*l);for(n=0,a=0;a<l;)if((r=t[a++])<128)h[n++]=r;else if((s=o[r])>4)h[n++]=65533,a+=s-1;else{for(r&=2===s?31:3===s?15:7;s>1&&a<l;)r=r<<6|63&t[a++],s--;s>1?h[n++]=65533:r<65536?h[n++]=r:(r-=65536,h[n++]=55296|r>>10&1023,h[n++]=56320|1023&r)}return i(h,n)},a.utf8border=function(t,e){var a;for((e=e||t.length)>t.length&&(e=t.length),a=e-1;a>=0&&128==(192&t[a]);)a--;return a<0?e:0===a?e:a+o[t[a]]>e?a:e}},{"./common":3}],5:[function(t,e,a){"use strict";e.exports=function(t,e,a,i){for(var n=65535&t|0,r=t>>>16&65535|0,s=0;0!==a;){a-=s=a>2e3?2e3:a;do{r=r+(n=n+e[i++]|0)|0}while(--s);n%=65521,r%=65521}return n|r<<16|0}},{}],6:[function(t,e,a){"use strict";e.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],7:[function(t,e,a){"use strict";var i=function(){for(var t,e=[],a=0;a<256;a++){t=a;for(var i=0;i<8;i++)t=1&t?3988292384^t>>>1:t>>>1;e[a]=t}return e}();e.exports=function(t,e,a,n){var r=i,s=n+a;t^=-1;for(var o=n;o<s;o++)t=t>>>8^r[255&(t^e[o])];return-1^t}},{}],8:[function(t,e,a){"use strict";function i(t,e){return t.msg=A[e],e}function n(t){return(t<<1)-(t>4?9:0)}function r(t){for(var e=t.length;--e>=0;)t[e]=0}function s(t){var e=t.state,a=e.pending;a>t.avail_out&&(a=t.avail_out),0!==a&&(z.arraySet(t.output,e.pending_buf,e.pending_out,a,t.next_out),t.next_out+=a,e.pending_out+=a,t.total_out+=a,t.avail_out-=a,e.pending-=a,0===e.pending&&(e.pending_out=0))}function o(t,e){B._tr_flush_block(t,t.block_start>=0?t.block_start:-1,t.strstart-t.block_start,e),t.block_start=t.strstart,s(t.strm)}function l(t,e){t.pending_buf[t.pending++]=e}function h(t,e){t.pending_buf[t.pending++]=e>>>8&255,t.pending_buf[t.pending++]=255&e}function d(t,e,a,i){var n=t.avail_in;return n>i&&(n=i),0===n?0:(t.avail_in-=n,z.arraySet(e,t.input,t.next_in,n,a),1===t.state.wrap?t.adler=S(t.adler,e,n,a):2===t.state.wrap&&(t.adler=E(t.adler,e,n,a)),t.next_in+=n,t.total_in+=n,n)}function f(t,e){var a,i,n=t.max_chain_length,r=t.strstart,s=t.prev_length,o=t.nice_match,l=t.strstart>t.w_size-it?t.strstart-(t.w_size-it):0,h=t.window,d=t.w_mask,f=t.prev,_=t.strstart+at,u=h[r+s-1],c=h[r+s];t.prev_length>=t.good_match&&(n>>=2),o>t.lookahead&&(o=t.lookahead);do{if(a=e,h[a+s]===c&&h[a+s-1]===u&&h[a]===h[r]&&h[++a]===h[r+1]){r+=2,a++;do{}while(h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&r<_);if(i=at-(_-r),r=_-at,i>s){if(t.match_start=e,s=i,i>=o)break;u=h[r+s-1],c=h[r+s]}}}while((e=f[e&d])>l&&0!=--n);return s<=t.lookahead?s:t.lookahead}function _(t){var e,a,i,n,r,s=t.w_size;do{if(n=t.window_size-t.lookahead-t.strstart,t.strstart>=s+(s-it)){z.arraySet(t.window,t.window,s,s,0),t.match_start-=s,t.strstart-=s,t.block_start-=s,e=a=t.hash_size;do{i=t.head[--e],t.head[e]=i>=s?i-s:0}while(--a);e=a=s;do{i=t.prev[--e],t.prev[e]=i>=s?i-s:0}while(--a);n+=s}if(0===t.strm.avail_in)break;if(a=d(t.strm,t.window,t.strstart+t.lookahead,n),t.lookahead+=a,t.lookahead+t.insert>=et)for(r=t.strstart-t.insert,t.ins_h=t.window[r],t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+1])&t.hash_mask;t.insert&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+et-1])&t.hash_mask,t.prev[r&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=r,r++,t.insert--,!(t.lookahead+t.insert<et)););}while(t.lookahead<it&&0!==t.strm.avail_in)}function u(t,e){for(var a,i;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),0!==a&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a)),t.match_length>=et)if(i=B._tr_tally(t,t.strstart-t.match_start,t.match_length-et),t.lookahead-=t.match_length,t.match_length<=t.max_lazy_match&&t.lookahead>=et){t.match_length--;do{t.strstart++,t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart}while(0!=--t.match_length);t.strstart++}else t.strstart+=t.match_length,t.match_length=0,t.ins_h=t.window[t.strstart],t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+1])&t.hash_mask;else i=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++;if(i&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function c(t,e){for(var a,i,n;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),t.prev_length=t.match_length,t.prev_match=t.match_start,t.match_length=et-1,0!==a&&t.prev_length<t.max_lazy_match&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a),t.match_length<=5&&(t.strategy===H||t.match_length===et&&t.strstart-t.match_start>4096)&&(t.match_length=et-1)),t.prev_length>=et&&t.match_length<=t.prev_length){n=t.strstart+t.lookahead-et,i=B._tr_tally(t,t.strstart-1-t.prev_match,t.prev_length-et),t.lookahead-=t.prev_length-1,t.prev_length-=2;do{++t.strstart<=n&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart)}while(0!=--t.prev_length);if(t.match_available=0,t.match_length=et-1,t.strstart++,i&&(o(t,!1),0===t.strm.avail_out))return _t}else if(t.match_available){if((i=B._tr_tally(t,0,t.window[t.strstart-1]))&&o(t,!1),t.strstart++,t.lookahead--,0===t.strm.avail_out)return _t}else t.match_available=1,t.strstart++,t.lookahead--}return t.match_available&&(i=B._tr_tally(t,0,t.window[t.strstart-1]),t.match_available=0),t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function b(t,e){for(var a,i,n,r,s=t.window;;){if(t.lookahead<=at){if(_(t),t.lookahead<=at&&e===Z)return _t;if(0===t.lookahead)break}if(t.match_length=0,t.lookahead>=et&&t.strstart>0&&(n=t.strstart-1,(i=s[n])===s[++n]&&i===s[++n]&&i===s[++n])){r=t.strstart+at;do{}while(i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&n<r);t.match_length=at-(r-n),t.match_length>t.lookahead&&(t.match_length=t.lookahead)}if(t.match_length>=et?(a=B._tr_tally(t,1,t.match_length-et),t.lookahead-=t.match_length,t.strstart+=t.match_length,t.match_length=0):(a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++),a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function g(t,e){for(var a;;){if(0===t.lookahead&&(_(t),0===t.lookahead)){if(e===Z)return _t;break}if(t.match_length=0,a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++,a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function m(t,e,a,i,n){this.good_length=t,this.max_lazy=e,this.nice_length=a,this.max_chain=i,this.func=n}function w(t){t.window_size=2*t.w_size,r(t.head),t.max_lazy_match=x[t.level].max_lazy,t.good_match=x[t.level].good_length,t.nice_match=x[t.level].nice_length,t.max_chain_length=x[t.level].max_chain,t.strstart=0,t.block_start=0,t.lookahead=0,t.insert=0,t.match_length=t.prev_length=et-1,t.match_available=0,t.ins_h=0}function p(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=q,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new z.Buf16(2*$),this.dyn_dtree=new z.Buf16(2*(2*Q+1)),this.bl_tree=new z.Buf16(2*(2*V+1)),r(this.dyn_ltree),r(this.dyn_dtree),r(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new z.Buf16(tt+1),this.heap=new z.Buf16(2*J+1),r(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new z.Buf16(2*J+1),r(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function v(t){var e;return t&&t.state?(t.total_in=t.total_out=0,t.data_type=Y,e=t.state,e.pending=0,e.pending_out=0,e.wrap<0&&(e.wrap=-e.wrap),e.status=e.wrap?rt:dt,t.adler=2===e.wrap?0:1,e.last_flush=Z,B._tr_init(e),D):i(t,U)}function k(t){var e=v(t);return e===D&&w(t.state),e}function y(t,e,a,n,r,s){if(!t)return U;var o=1;if(e===L&&(e=6),n<0?(o=0,n=-n):n>15&&(o=2,n-=16),r<1||r>G||a!==q||n<8||n>15||e<0||e>9||s<0||s>M)return i(t,U);8===n&&(n=9);var l=new p;return t.state=l,l.strm=t,l.wrap=o,l.gzhead=null,l.w_bits=n,l.w_size=1<<l.w_bits,l.w_mask=l.w_size-1,l.hash_bits=r+7,l.hash_size=1<<l.hash_bits,l.hash_mask=l.hash_size-1,l.hash_shift=~~((l.hash_bits+et-1)/et),l.window=new z.Buf8(2*l.w_size),l.head=new z.Buf16(l.hash_size),l.prev=new z.Buf16(l.w_size),l.lit_bufsize=1<<r+6,l.pending_buf_size=4*l.lit_bufsize,l.pending_buf=new z.Buf8(l.pending_buf_size),l.d_buf=1*l.lit_bufsize,l.l_buf=3*l.lit_bufsize,l.level=e,l.strategy=s,l.method=a,k(t)}var x,z=t("../utils/common"),B=t("./trees"),S=t("./adler32"),E=t("./crc32"),A=t("./messages"),Z=0,R=1,C=3,N=4,O=5,D=0,I=1,U=-2,T=-3,F=-5,L=-1,H=1,j=2,K=3,M=4,P=0,Y=2,q=8,G=9,X=15,W=8,J=286,Q=30,V=19,$=2*J+1,tt=15,et=3,at=258,it=at+et+1,nt=32,rt=42,st=69,ot=73,lt=91,ht=103,dt=113,ft=666,_t=1,ut=2,ct=3,bt=4,gt=3;x=[new m(0,0,0,0,function(t,e){var a=65535;for(a>t.pending_buf_size-5&&(a=t.pending_buf_size-5);;){if(t.lookahead<=1){if(_(t),0===t.lookahead&&e===Z)return _t;if(0===t.lookahead)break}t.strstart+=t.lookahead,t.lookahead=0;var i=t.block_start+a;if((0===t.strstart||t.strstart>=i)&&(t.lookahead=t.strstart-i,t.strstart=i,o(t,!1),0===t.strm.avail_out))return _t;if(t.strstart-t.block_start>=t.w_size-it&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):(t.strstart>t.block_start&&(o(t,!1),t.strm.avail_out),_t)}),new m(4,4,8,4,u),new m(4,5,16,8,u),new m(4,6,32,32,u),new m(4,4,16,16,c),new m(8,16,32,32,c),new m(8,16,128,128,c),new m(8,32,128,256,c),new m(32,128,258,1024,c),new m(32,258,258,4096,c)],a.deflateInit=function(t,e){return y(t,e,q,X,W,P)},a.deflateInit2=y,a.deflateReset=k,a.deflateResetKeep=v,a.deflateSetHeader=function(t,e){return t&&t.state?2!==t.state.wrap?U:(t.state.gzhead=e,D):U},a.deflate=function(t,e){var a,o,d,f;if(!t||!t.state||e>O||e<0)return t?i(t,U):U;if(o=t.state,!t.output||!t.input&&0!==t.avail_in||o.status===ft&&e!==N)return i(t,0===t.avail_out?F:U);if(o.strm=t,a=o.last_flush,o.last_flush=e,o.status===rt)if(2===o.wrap)t.adler=0,l(o,31),l(o,139),l(o,8),o.gzhead?(l(o,(o.gzhead.text?1:0)+(o.gzhead.hcrc?2:0)+(o.gzhead.extra?4:0)+(o.gzhead.name?8:0)+(o.gzhead.comment?16:0)),l(o,255&o.gzhead.time),l(o,o.gzhead.time>>8&255),l(o,o.gzhead.time>>16&255),l(o,o.gzhead.time>>24&255),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,255&o.gzhead.os),o.gzhead.extra&&o.gzhead.extra.length&&(l(o,255&o.gzhead.extra.length),l(o,o.gzhead.extra.length>>8&255)),o.gzhead.hcrc&&(t.adler=E(t.adler,o.pending_buf,o.pending,0)),o.gzindex=0,o.status=st):(l(o,0),l(o,0),l(o,0),l(o,0),l(o,0),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,gt),o.status=dt);else{var _=q+(o.w_bits-8<<4)<<8;_|=(o.strategy>=j||o.level<2?0:o.level<6?1:6===o.level?2:3)<<6,0!==o.strstart&&(_|=nt),_+=31-_%31,o.status=dt,h(o,_),0!==o.strstart&&(h(o,t.adler>>>16),h(o,65535&t.adler)),t.adler=1}if(o.status===st)if(o.gzhead.extra){for(d=o.pending;o.gzindex<(65535&o.gzhead.extra.length)&&(o.pending!==o.pending_buf_size||(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending!==o.pending_buf_size));)l(o,255&o.gzhead.extra[o.gzindex]),o.gzindex++;o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),o.gzindex===o.gzhead.extra.length&&(o.gzindex=0,o.status=ot)}else o.status=ot;if(o.status===ot)if(o.gzhead.name){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.name.length?255&o.gzhead.name.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.gzindex=0,o.status=lt)}else o.status=lt;if(o.status===lt)if(o.gzhead.comment){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.comment.length?255&o.gzhead.comment.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.status=ht)}else o.status=ht;if(o.status===ht&&(o.gzhead.hcrc?(o.pending+2>o.pending_buf_size&&s(t),o.pending+2<=o.pending_buf_size&&(l(o,255&t.adler),l(o,t.adler>>8&255),t.adler=0,o.status=dt)):o.status=dt),0!==o.pending){if(s(t),0===t.avail_out)return o.last_flush=-1,D}else if(0===t.avail_in&&n(e)<=n(a)&&e!==N)return i(t,F);if(o.status===ft&&0!==t.avail_in)return i(t,F);if(0!==t.avail_in||0!==o.lookahead||e!==Z&&o.status!==ft){var u=o.strategy===j?g(o,e):o.strategy===K?b(o,e):x[o.level].func(o,e);if(u!==ct&&u!==bt||(o.status=ft),u===_t||u===ct)return 0===t.avail_out&&(o.last_flush=-1),D;if(u===ut&&(e===R?B._tr_align(o):e!==O&&(B._tr_stored_block(o,0,0,!1),e===C&&(r(o.head),0===o.lookahead&&(o.strstart=0,o.block_start=0,o.insert=0))),s(t),0===t.avail_out))return o.last_flush=-1,D}return e!==N?D:o.wrap<=0?I:(2===o.wrap?(l(o,255&t.adler),l(o,t.adler>>8&255),l(o,t.adler>>16&255),l(o,t.adler>>24&255),l(o,255&t.total_in),l(o,t.total_in>>8&255),l(o,t.total_in>>16&255),l(o,t.total_in>>24&255)):(h(o,t.adler>>>16),h(o,65535&t.adler)),s(t),o.wrap>0&&(o.wrap=-o.wrap),0!==o.pending?D:I)},a.deflateEnd=function(t){var e;return t&&t.state?(e=t.state.status)!==rt&&e!==st&&e!==ot&&e!==lt&&e!==ht&&e!==dt&&e!==ft?i(t,U):(t.state=null,e===dt?i(t,T):D):U},a.deflateSetDictionary=function(t,e){var a,i,n,s,o,l,h,d,f=e.length;if(!t||!t.state)return U;if(a=t.state,2===(s=a.wrap)||1===s&&a.status!==rt||a.lookahead)return U;for(1===s&&(t.adler=S(t.adler,e,f,0)),a.wrap=0,f>=a.w_size&&(0===s&&(r(a.head),a.strstart=0,a.block_start=0,a.insert=0),d=new z.Buf8(a.w_size),z.arraySet(d,e,f-a.w_size,a.w_size,0),e=d,f=a.w_size),o=t.avail_in,l=t.next_in,h=t.input,t.avail_in=f,t.next_in=0,t.input=e,_(a);a.lookahead>=et;){i=a.strstart,n=a.lookahead-(et-1);do{a.ins_h=(a.ins_h<<a.hash_shift^a.window[i+et-1])&a.hash_mask,a.prev[i&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=i,i++}while(--n);a.strstart=i,a.lookahead=et-1,_(a)}return a.strstart+=a.lookahead,a.block_start=a.strstart,a.insert=a.lookahead,a.lookahead=0,a.match_length=a.prev_length=et-1,a.match_available=0,t.next_in=l,t.input=h,t.avail_in=o,a.wrap=s,D},a.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./messages":13,"./trees":14}],9:[function(t,e,a){"use strict";e.exports=function(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}},{}],10:[function(t,e,a){"use strict";e.exports=function(t,e){var a,i,n,r,s,o,l,h,d,f,_,u,c,b,g,m,w,p,v,k,y,x,z,B,S;a=t.state,i=t.next_in,B=t.input,n=i+(t.avail_in-5),r=t.next_out,S=t.output,s=r-(e-t.avail_out),o=r+(t.avail_out-257),l=a.dmax,h=a.wsize,d=a.whave,f=a.wnext,_=a.window,u=a.hold,c=a.bits,b=a.lencode,g=a.distcode,m=(1<<a.lenbits)-1,w=(1<<a.distbits)-1;t:do{c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=b[u&m];e:for(;;){if(v=p>>>24,u>>>=v,c-=v,0===(v=p>>>16&255))S[r++]=65535&p;else{if(!(16&v)){if(0==(64&v)){p=b[(65535&p)+(u&(1<<v)-1)];continue e}if(32&v){a.mode=12;break t}t.msg="invalid literal/length code",a.mode=30;break t}k=65535&p,(v&=15)&&(c<v&&(u+=B[i++]<<c,c+=8),k+=u&(1<<v)-1,u>>>=v,c-=v),c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=g[u&w];a:for(;;){if(v=p>>>24,u>>>=v,c-=v,!(16&(v=p>>>16&255))){if(0==(64&v)){p=g[(65535&p)+(u&(1<<v)-1)];continue a}t.msg="invalid distance code",a.mode=30;break t}if(y=65535&p,v&=15,c<v&&(u+=B[i++]<<c,(c+=8)<v&&(u+=B[i++]<<c,c+=8)),(y+=u&(1<<v)-1)>l){t.msg="invalid distance too far back",a.mode=30;break t}if(u>>>=v,c-=v,v=r-s,y>v){if((v=y-v)>d&&a.correct){t.msg="invalid distance too far back",a.mode=30;break t}if(x=0,z=_,0===f){if(x+=h-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}else if(f<v){if(x+=h+f-v,(v-=f)<k){k-=v;do{S[r++]=_[x++]}while(--v);if(x=0,f<k){k-=v=f;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}}else if(x+=f-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}for(;k>2;)S[r++]=z[x++],S[r++]=z[x++],S[r++]=z[x++],k-=3;k&&(S[r++]=z[x++],k>1&&(S[r++]=z[x++]))}else{x=r-y;do{S[r++]=S[x++],S[r++]=S[x++],S[r++]=S[x++],k-=3}while(k>2);k&&(S[r++]=S[x++],k>1&&(S[r++]=S[x++]))}break}}break}}while(i<n&&r<o);i-=k=c>>3,u&=(1<<(c-=k<<3))-1,t.next_in=i,t.next_out=r,t.avail_in=i<n?n-i+5:5-(i-n),t.avail_out=r<o?o-r+257:257-(r-o),a.hold=u,a.bits=c}},{}],11:[function(t,e,a){"use strict";function i(t){return(t>>>24&255)+(t>>>8&65280)+((65280&t)<<8)+((255&t)<<24)}function n(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new u.Buf16(320),this.work=new u.Buf16(288),this.lendyn=null,this.distdyn=null,this.correct=0,this.back=0,this.was=0}function r(t){var e;return t&&t.state?(e=t.state,t.total_in=t.total_out=e.total=0,t.msg="",e.wrap&&(t.adler=1&e.wrap),e.mode=N,e.last=0,e.havedict=0,e.dmax=32768,e.head=null,e.hold=0,e.bits=0,e.lencode=e.lendyn=new u.Buf32(dt),e.distcode=e.distdyn=new u.Buf32(ft),e.correct=1,e.back=-1,z):E}function s(t){var e;return t&&t.state?(e=t.state,e.wsize=0,e.whave=0,e.wnext=0,r(t)):E}function o(t,e){var a,i;return t&&t.state?(i=t.state,e<0?(a=0,e=-e):(a=1+(e>>4),e<48&&(e&=15)),e&&(e<8||e>15)?E:(null!==i.window&&i.wbits!==e&&(i.window=null),i.wrap=a,i.wbits=e,s(t))):E}function l(t,e){var a,i;return t?(i=new n,t.state=i,i.window=null,(a=o(t,e))!==z&&(t.state=null),a):E}function h(t){if(ut){var e;for(f=new u.Buf32(512),_=new u.Buf32(32),e=0;e<144;)t.lens[e++]=8;for(;e<256;)t.lens[e++]=9;for(;e<280;)t.lens[e++]=7;for(;e<288;)t.lens[e++]=8;for(m(p,t.lens,0,288,f,0,t.work,{bits:9}),e=0;e<32;)t.lens[e++]=5;m(v,t.lens,0,32,_,0,t.work,{bits:5}),ut=!1}t.lencode=f,t.lenbits=9,t.distcode=_,t.distbits=5}function d(t,e,a,i){var n,r=t.state;return null===r.window&&(r.wsize=1<<r.wbits,r.wnext=0,r.whave=0,r.window=new u.Buf8(r.wsize)),i>=r.wsize?(u.arraySet(r.window,e,a-r.wsize,r.wsize,0),r.wnext=0,r.whave=r.wsize):((n=r.wsize-r.wnext)>i&&(n=i),u.arraySet(r.window,e,a-i,n,r.wnext),(i-=n)?(u.arraySet(r.window,e,a-i,i,0),r.wnext=i,r.whave=r.wsize):(r.wnext+=n,r.wnext===r.wsize&&(r.wnext=0),r.whave<r.wsize&&(r.whave+=n))),0}var f,_,u=t("../utils/common"),c=t("./adler32"),b=t("./crc32"),g=t("./inffast"),m=t("./inftrees"),w=0,p=1,v=2,k=4,y=5,x=6,z=0,B=1,S=2,E=-2,A=-3,Z=-4,R=-5,C=8,N=1,O=2,D=3,I=4,U=5,T=6,F=7,L=8,H=9,j=10,K=11,M=12,P=13,Y=14,q=15,G=16,X=17,W=18,J=19,Q=20,V=21,$=22,tt=23,et=24,at=25,it=26,nt=27,rt=28,st=29,ot=30,lt=31,ht=32,dt=852,ft=592,_t=15,ut=!0;a.inflateReset=s,a.inflateReset2=o,a.inflateResetKeep=r,a.inflateInit=function(t){return l(t,_t)},a.inflateInit2=l,a.inflate=function(t,e){var a,n,r,s,o,l,f,_,dt,ft,_t,ut,ct,bt,gt,mt,wt,pt,vt,kt,yt,xt,zt,Bt,St=0,Et=new u.Buf8(4),At=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!t||!t.state||!t.output||!t.input&&0!==t.avail_in)return E;(a=t.state).mode===M&&(a.mode=P),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,ft=l,_t=f,xt=z;t:for(;;)switch(a.mode){case N:if(0===a.wrap){a.mode=P;break}for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(2&a.wrap&&35615===_){a.check=0,Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0),_=0,dt=0,a.mode=O;break}if(a.flags=0,a.head&&(a.head.done=!1),!(1&a.wrap)||(((255&_)<<8)+(_>>8))%31){t.msg="incorrect header check",a.mode=ot;break}if((15&_)!==C){t.msg="unknown compression method",a.mode=ot;break}if(_>>>=4,dt-=4,yt=8+(15&_),0===a.wbits)a.wbits=yt;else if(yt>a.wbits){t.msg="invalid window size",a.mode=ot;break}a.dmax=1<<yt,t.adler=a.check=1,a.mode=512&_?j:M,_=0,dt=0;break;case O:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.flags=_,(255&a.flags)!==C){t.msg="unknown compression method",a.mode=ot;break}if(57344&a.flags){t.msg="unknown header flags set",a.mode=ot;break}a.head&&(a.head.text=_>>8&1),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=D;case D:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.time=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,Et[2]=_>>>16&255,Et[3]=_>>>24&255,a.check=b(a.check,Et,4,0)),_=0,dt=0,a.mode=I;case I:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.xflags=255&_,a.head.os=_>>8),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=U;case U:if(1024&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length=_,a.head&&(a.head.extra_len=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0}else a.head&&(a.head.extra=null);a.mode=T;case T:if(1024&a.flags&&((ut=a.length)>l&&(ut=l),ut&&(a.head&&(yt=a.head.extra_len-a.length,a.head.extra||(a.head.extra=new Array(a.head.extra_len)),u.arraySet(a.head.extra,n,s,ut,yt)),512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,a.length-=ut),a.length))break t;a.length=0,a.mode=F;case F:if(2048&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.name+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.name=null);a.length=0,a.mode=L;case L:if(4096&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.comment+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.comment=null);a.mode=H;case H:if(512&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(65535&a.check)){t.msg="header crc mismatch",a.mode=ot;break}_=0,dt=0}a.head&&(a.head.hcrc=a.flags>>9&1,a.head.done=!0),t.adler=a.check=0,a.mode=M;break;case j:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}t.adler=a.check=i(_),_=0,dt=0,a.mode=K;case K:if(0===a.havedict)return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,S;t.adler=a.check=1,a.mode=M;case M:if(e===y||e===x)break t;case P:if(a.last){_>>>=7&dt,dt-=7&dt,a.mode=nt;break}for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}switch(a.last=1&_,_>>>=1,dt-=1,3&_){case 0:a.mode=Y;break;case 1:if(h(a),a.mode=Q,e===x){_>>>=2,dt-=2;break t}break;case 2:a.mode=X;break;case 3:t.msg="invalid block type",a.mode=ot}_>>>=2,dt-=2;break;case Y:for(_>>>=7&dt,dt-=7&dt;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if((65535&_)!=(_>>>16^65535)){t.msg="invalid stored block lengths",a.mode=ot;break}if(a.length=65535&_,_=0,dt=0,a.mode=q,e===x)break t;case q:a.mode=G;case G:if(ut=a.length){if(ut>l&&(ut=l),ut>f&&(ut=f),0===ut)break t;u.arraySet(r,n,s,ut,o),l-=ut,s+=ut,f-=ut,o+=ut,a.length-=ut;break}a.mode=M;break;case X:for(;dt<14;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.nlen=257+(31&_),_>>>=5,dt-=5,a.ndist=1+(31&_),_>>>=5,dt-=5,a.ncode=4+(15&_),_>>>=4,dt-=4,a.nlen>286||a.ndist>30){t.msg="too many length or distance symbols",a.mode=ot;break}a.have=0,a.mode=W;case W:for(;a.have<a.ncode;){for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.lens[At[a.have++]]=7&_,_>>>=3,dt-=3}for(;a.have<19;)a.lens[At[a.have++]]=0;if(a.lencode=a.lendyn,a.lenbits=7,zt={bits:a.lenbits},xt=m(w,a.lens,0,19,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid code lengths set",a.mode=ot;break}a.have=0,a.mode=J;case J:for(;a.have<a.nlen+a.ndist;){for(;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(wt<16)_>>>=gt,dt-=gt,a.lens[a.have++]=wt;else{if(16===wt){for(Bt=gt+2;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_>>>=gt,dt-=gt,0===a.have){t.msg="invalid bit length repeat",a.mode=ot;break}yt=a.lens[a.have-1],ut=3+(3&_),_>>>=2,dt-=2}else if(17===wt){for(Bt=gt+3;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=3+(7&(_>>>=gt)),_>>>=3,dt-=3}else{for(Bt=gt+7;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=11+(127&(_>>>=gt)),_>>>=7,dt-=7}if(a.have+ut>a.nlen+a.ndist){t.msg="invalid bit length repeat",a.mode=ot;break}for(;ut--;)a.lens[a.have++]=yt}}if(a.mode===ot)break;if(0===a.lens[256]){t.msg="invalid code -- missing end-of-block",a.mode=ot;break}if(a.lenbits=9,zt={bits:a.lenbits},xt=m(p,a.lens,0,a.nlen,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid literal/lengths set",a.mode=ot;break}if(a.distbits=6,a.distcode=a.distdyn,zt={bits:a.distbits},xt=m(v,a.lens,a.nlen,a.ndist,a.distcode,0,a.work,zt),a.distbits=zt.bits,xt){t.msg="invalid distances set",a.mode=ot;break}if(a.mode=Q,e===x)break t;case Q:a.mode=V;case V:if(l>=6&&f>=258){t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,g(t,_t),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,a.mode===M&&(a.back=-1);break}for(a.back=0;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(mt&&0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.lencode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,a.length=wt,0===mt){a.mode=it;break}if(32&mt){a.back=-1,a.mode=M;break}if(64&mt){t.msg="invalid literal/length code",a.mode=ot;break}a.extra=15&mt,a.mode=$;case $:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}a.was=a.length,a.mode=tt;case tt:for(;St=a.distcode[_&(1<<a.distbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.distcode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,64&mt){t.msg="invalid distance code",a.mode=ot;break}a.offset=wt,a.extra=15&mt,a.mode=et;case et:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.offset+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}if(a.offset>a.dmax){t.msg="invalid distance too far back",a.mode=ot;break}a.mode=at;case at:if(0===f)break t;if(ut=_t-f,a.offset>ut){if((ut=a.offset-ut)>a.whave&&a.correct){t.msg="invalid distance too far back",a.mode=ot;break}ut>a.wnext?(ut-=a.wnext,ct=a.wsize-ut):ct=a.wnext-ut,ut>a.length&&(ut=a.length),bt=a.window}else bt=r,ct=o-a.offset,ut=a.length;ut>f&&(ut=f),f-=ut,a.length-=ut;do{r[o++]=bt[ct++]}while(--ut);0===a.length&&(a.mode=V);break;case it:if(0===f)break t;r[o++]=a.length,f--,a.mode=V;break;case nt:if(a.wrap){for(;dt<32;){if(0===l)break t;l--,_|=n[s++]<<dt,dt+=8}if(_t-=f,t.total_out+=_t,a.total+=_t,_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,o-_t):c(a.check,r,_t,o-_t)),_t=f,(a.flags?_:i(_))!==a.check){t.msg="incorrect data check",a.mode=ot;break}_=0,dt=0}a.mode=rt;case rt:if(a.wrap&&a.flags){for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(4294967295&a.total)){t.msg="incorrect length check",a.mode=ot;break}_=0,dt=0}a.mode=st;case st:xt=B;break t;case ot:xt=A;break t;case lt:return Z;case ht:default:return E}return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,(a.wsize||_t!==t.avail_out&&a.mode<ot&&(a.mode<nt||e!==k))&&d(t,t.output,t.next_out,_t-t.avail_out)?(a.mode=lt,Z):(ft-=t.avail_in,_t-=t.avail_out,t.total_in+=ft,t.total_out+=_t,a.total+=_t,a.wrap&&_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,t.next_out-_t):c(a.check,r,_t,t.next_out-_t)),t.data_type=a.bits+(a.last?64:0)+(a.mode===M?128:0)+(a.mode===Q||a.mode===q?256:0),(0===ft&&0===_t||e===k)&&xt===z&&(xt=R),xt)},a.inflateEnd=function(t){if(!t||!t.state)return E;var e=t.state;return e.window&&(e.window=null),t.state=null,z},a.inflateGetHeader=function(t,e){var a;return t&&t.state?0==(2&(a=t.state).wrap)?E:(a.head=e,e.done=!1,z):E},a.inflateSetDictionary=function(t,e){var a,i,n=e.length;return t&&t.state?0!==(a=t.state).wrap&&a.mode!==K?E:a.mode===K&&(i=1,(i=c(i,e,n,0))!==a.check)?A:d(t,e,n,n)?(a.mode=lt,Z):(a.havedict=1,z):E},a.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./inffast":10,"./inftrees":12}],12:[function(t,e,a){"use strict";var i=t("../utils/common"),n=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],r=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],s=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],o=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];e.exports=function(t,e,a,l,h,d,f,_){var u,c,b,g,m,w,p,v,k,y=_.bits,x=0,z=0,B=0,S=0,E=0,A=0,Z=0,R=0,C=0,N=0,O=null,D=0,I=new i.Buf16(16),U=new i.Buf16(16),T=null,F=0;for(x=0;x<=15;x++)I[x]=0;for(z=0;z<l;z++)I[e[a+z]]++;for(E=y,S=15;S>=1&&0===I[S];S--);if(E>S&&(E=S),0===S)return h[d++]=20971520,h[d++]=20971520,_.bits=1,0;for(B=1;B<S&&0===I[B];B++);for(E<B&&(E=B),R=1,x=1;x<=15;x++)if(R<<=1,(R-=I[x])<0)return-1;if(R>0&&(0===t||1!==S))return-1;for(U[1]=0,x=1;x<15;x++)U[x+1]=U[x]+I[x];for(z=0;z<l;z++)0!==e[a+z]&&(f[U[e[a+z]]++]=z);if(0===t?(O=T=f,w=19):1===t?(O=n,D-=257,T=r,F-=257,w=256):(O=s,T=o,w=-1),N=0,z=0,x=B,m=d,A=E,Z=0,b=-1,C=1<<E,g=C-1,1===t&&C>852||2===t&&C>592)return 1;for(;;){p=x-Z,f[z]<w?(v=0,k=f[z]):f[z]>w?(v=T[F+f[z]],k=O[D+f[z]]):(v=96,k=0),u=1<<x-Z,B=c=1<<A;do{h[m+(N>>Z)+(c-=u)]=p<<24|v<<16|k|0}while(0!==c);for(u=1<<x-1;N&u;)u>>=1;if(0!==u?(N&=u-1,N+=u):N=0,z++,0==--I[x]){if(x===S)break;x=e[a+f[z]]}if(x>E&&(N&g)!==b){for(0===Z&&(Z=E),m+=B,R=1<<(A=x-Z);A+Z<S&&!((R-=I[A+Z])<=0);)A++,R<<=1;if(C+=1<<A,1===t&&C>852||2===t&&C>592)return 1;h[b=N&g]=E<<24|A<<16|m-d|0}}return 0!==N&&(h[m+N]=x-Z<<24|64<<16|0),_.bits=E,0}},{"../utils/common":3}],13:[function(t,e,a){"use strict";e.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],14:[function(t,e,a){"use strict";function i(t){for(var e=t.length;--e>=0;)t[e]=0}function n(t,e,a,i,n){this.static_tree=t,this.extra_bits=e,this.extra_base=a,this.elems=i,this.max_length=n,this.has_stree=t&&t.length}function r(t,e){this.dyn_tree=t,this.max_code=0,this.stat_desc=e}function s(t){return t<256?et[t]:et[256+(t>>>7)]}function o(t,e){t.pending_buf[t.pending++]=255&e,t.pending_buf[t.pending++]=e>>>8&255}function l(t,e,a){t.bi_valid>M-a?(t.bi_buf|=e<<t.bi_valid&65535,o(t,t.bi_buf),t.bi_buf=e>>M-t.bi_valid,t.bi_valid+=a-M):(t.bi_buf|=e<<t.bi_valid&65535,t.bi_valid+=a)}function h(t,e,a){l(t,a[2*e],a[2*e+1])}function d(t,e){var a=0;do{a|=1&t,t>>>=1,a<<=1}while(--e>0);return a>>>1}function f(t){16===t.bi_valid?(o(t,t.bi_buf),t.bi_buf=0,t.bi_valid=0):t.bi_valid>=8&&(t.pending_buf[t.pending++]=255&t.bi_buf,t.bi_buf>>=8,t.bi_valid-=8)}function _(t,e){var a,i,n,r,s,o,l=e.dyn_tree,h=e.max_code,d=e.stat_desc.static_tree,f=e.stat_desc.has_stree,_=e.stat_desc.extra_bits,u=e.stat_desc.extra_base,c=e.stat_desc.max_length,b=0;for(r=0;r<=K;r++)t.bl_count[r]=0;for(l[2*t.heap[t.heap_max]+1]=0,a=t.heap_max+1;a<j;a++)(r=l[2*l[2*(i=t.heap[a])+1]+1]+1)>c&&(r=c,b++),l[2*i+1]=r,i>h||(t.bl_count[r]++,s=0,i>=u&&(s=_[i-u]),o=l[2*i],t.opt_len+=o*(r+s),f&&(t.static_len+=o*(d[2*i+1]+s)));if(0!==b){do{for(r=c-1;0===t.bl_count[r];)r--;t.bl_count[r]--,t.bl_count[r+1]+=2,t.bl_count[c]--,b-=2}while(b>0);for(r=c;0!==r;r--)for(i=t.bl_count[r];0!==i;)(n=t.heap[--a])>h||(l[2*n+1]!==r&&(t.opt_len+=(r-l[2*n+1])*l[2*n],l[2*n+1]=r),i--)}}function u(t,e,a){var i,n,r=new Array(K+1),s=0;for(i=1;i<=K;i++)r[i]=s=s+a[i-1]<<1;for(n=0;n<=e;n++){var o=t[2*n+1];0!==o&&(t[2*n]=d(r[o]++,o))}}function c(){var t,e,a,i,r,s=new Array(K+1);for(a=0,i=0;i<U-1;i++)for(it[i]=a,t=0;t<1<<W[i];t++)at[a++]=i;for(at[a-1]=i,r=0,i=0;i<16;i++)for(nt[i]=r,t=0;t<1<<J[i];t++)et[r++]=i;for(r>>=7;i<L;i++)for(nt[i]=r<<7,t=0;t<1<<J[i]-7;t++)et[256+r++]=i;for(e=0;e<=K;e++)s[e]=0;for(t=0;t<=143;)$[2*t+1]=8,t++,s[8]++;for(;t<=255;)$[2*t+1]=9,t++,s[9]++;for(;t<=279;)$[2*t+1]=7,t++,s[7]++;for(;t<=287;)$[2*t+1]=8,t++,s[8]++;for(u($,F+1,s),t=0;t<L;t++)tt[2*t+1]=5,tt[2*t]=d(t,5);rt=new n($,W,T+1,F,K),st=new n(tt,J,0,L,K),ot=new n(new Array(0),Q,0,H,P)}function b(t){var e;for(e=0;e<F;e++)t.dyn_ltree[2*e]=0;for(e=0;e<L;e++)t.dyn_dtree[2*e]=0;for(e=0;e<H;e++)t.bl_tree[2*e]=0;t.dyn_ltree[2*Y]=1,t.opt_len=t.static_len=0,t.last_lit=t.matches=0}function g(t){t.bi_valid>8?o(t,t.bi_buf):t.bi_valid>0&&(t.pending_buf[t.pending++]=t.bi_buf),t.bi_buf=0,t.bi_valid=0}function m(t,e,a,i){g(t),i&&(o(t,a),o(t,~a)),A.arraySet(t.pending_buf,t.window,e,a,t.pending),t.pending+=a}function w(t,e,a,i){var n=2*e,r=2*a;return t[n]<t[r]||t[n]===t[r]&&i[e]<=i[a]}function p(t,e,a){for(var i=t.heap[a],n=a<<1;n<=t.heap_len&&(n<t.heap_len&&w(e,t.heap[n+1],t.heap[n],t.depth)&&n++,!w(e,i,t.heap[n],t.depth));)t.heap[a]=t.heap[n],a=n,n<<=1;t.heap[a]=i}function v(t,e,a){var i,n,r,o,d=0;if(0!==t.last_lit)do{i=t.pending_buf[t.d_buf+2*d]<<8|t.pending_buf[t.d_buf+2*d+1],n=t.pending_buf[t.l_buf+d],d++,0===i?h(t,n,e):(h(t,(r=at[n])+T+1,e),0!==(o=W[r])&&l(t,n-=it[r],o),h(t,r=s(--i),a),0!==(o=J[r])&&l(t,i-=nt[r],o))}while(d<t.last_lit);h(t,Y,e)}function k(t,e){var a,i,n,r=e.dyn_tree,s=e.stat_desc.static_tree,o=e.stat_desc.has_stree,l=e.stat_desc.elems,h=-1;for(t.heap_len=0,t.heap_max=j,a=0;a<l;a++)0!==r[2*a]?(t.heap[++t.heap_len]=h=a,t.depth[a]=0):r[2*a+1]=0;for(;t.heap_len<2;)r[2*(n=t.heap[++t.heap_len]=h<2?++h:0)]=1,t.depth[n]=0,t.opt_len--,o&&(t.static_len-=s[2*n+1]);for(e.max_code=h,a=t.heap_len>>1;a>=1;a--)p(t,r,a);n=l;do{a=t.heap[1],t.heap[1]=t.heap[t.heap_len--],p(t,r,1),i=t.heap[1],t.heap[--t.heap_max]=a,t.heap[--t.heap_max]=i,r[2*n]=r[2*a]+r[2*i],t.depth[n]=(t.depth[a]>=t.depth[i]?t.depth[a]:t.depth[i])+1,r[2*a+1]=r[2*i+1]=n,t.heap[1]=n++,p(t,r,1)}while(t.heap_len>=2);t.heap[--t.heap_max]=t.heap[1],_(t,e),u(r,h,t.bl_count)}function y(t,e,a){var i,n,r=-1,s=e[1],o=0,l=7,h=4;for(0===s&&(l=138,h=3),e[2*(a+1)+1]=65535,i=0;i<=a;i++)n=s,s=e[2*(i+1)+1],++o<l&&n===s||(o<h?t.bl_tree[2*n]+=o:0!==n?(n!==r&&t.bl_tree[2*n]++,t.bl_tree[2*q]++):o<=10?t.bl_tree[2*G]++:t.bl_tree[2*X]++,o=0,r=n,0===s?(l=138,h=3):n===s?(l=6,h=3):(l=7,h=4))}function x(t,e,a){var i,n,r=-1,s=e[1],o=0,d=7,f=4;for(0===s&&(d=138,f=3),i=0;i<=a;i++)if(n=s,s=e[2*(i+1)+1],!(++o<d&&n===s)){if(o<f)do{h(t,n,t.bl_tree)}while(0!=--o);else 0!==n?(n!==r&&(h(t,n,t.bl_tree),o--),h(t,q,t.bl_tree),l(t,o-3,2)):o<=10?(h(t,G,t.bl_tree),l(t,o-3,3)):(h(t,X,t.bl_tree),l(t,o-11,7));o=0,r=n,0===s?(d=138,f=3):n===s?(d=6,f=3):(d=7,f=4)}}function z(t){var e;for(y(t,t.dyn_ltree,t.l_desc.max_code),y(t,t.dyn_dtree,t.d_desc.max_code),k(t,t.bl_desc),e=H-1;e>=3&&0===t.bl_tree[2*V[e]+1];e--);return t.opt_len+=3*(e+1)+5+5+4,e}function B(t,e,a,i){var n;for(l(t,e-257,5),l(t,a-1,5),l(t,i-4,4),n=0;n<i;n++)l(t,t.bl_tree[2*V[n]+1],3);x(t,t.dyn_ltree,e-1),x(t,t.dyn_dtree,a-1)}function S(t){var e,a=4093624447;for(e=0;e<=31;e++,a>>>=1)if(1&a&&0!==t.dyn_ltree[2*e])return R;if(0!==t.dyn_ltree[18]||0!==t.dyn_ltree[20]||0!==t.dyn_ltree[26])return C;for(e=32;e<T;e++)if(0!==t.dyn_ltree[2*e])return C;return R}function E(t,e,a,i){l(t,(O<<1)+(i?1:0),3),m(t,e,a,!0)}var A=t("../utils/common"),Z=4,R=0,C=1,N=2,O=0,D=1,I=2,U=29,T=256,F=T+1+U,L=30,H=19,j=2*F+1,K=15,M=16,P=7,Y=256,q=16,G=17,X=18,W=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],J=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],Q=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],V=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],$=new Array(2*(F+2));i($);var tt=new Array(2*L);i(tt);var et=new Array(512);i(et);var at=new Array(256);i(at);var it=new Array(U);i(it);var nt=new Array(L);i(nt);var rt,st,ot,lt=!1;a._tr_init=function(t){lt||(c(),lt=!0),t.l_desc=new r(t.dyn_ltree,rt),t.d_desc=new r(t.dyn_dtree,st),t.bl_desc=new r(t.bl_tree,ot),t.bi_buf=0,t.bi_valid=0,b(t)},a._tr_stored_block=E,a._tr_flush_block=function(t,e,a,i){var n,r,s=0;t.level>0?(t.strm.data_type===N&&(t.strm.data_type=S(t)),k(t,t.l_desc),k(t,t.d_desc),s=z(t),n=t.opt_len+3+7>>>3,(r=t.static_len+3+7>>>3)<=n&&(n=r)):n=r=a+5,a+4<=n&&-1!==e?E(t,e,a,i):t.strategy===Z||r===n?(l(t,(D<<1)+(i?1:0),3),v(t,$,tt)):(l(t,(I<<1)+(i?1:0),3),B(t,t.l_desc.max_code+1,t.d_desc.max_code+1,s+1),v(t,t.dyn_ltree,t.dyn_dtree)),b(t),i&&g(t)},a._tr_tally=function(t,e,a){return t.pending_buf[t.d_buf+2*t.last_lit]=e>>>8&255,t.pending_buf[t.d_buf+2*t.last_lit+1]=255&e,t.pending_buf[t.l_buf+t.last_lit]=255&a,t.last_lit++,0===e?t.dyn_ltree[2*a]++:(t.matches++,e--,t.dyn_ltree[2*(at[a]+T+1)]++,t.dyn_dtree[2*s(e)]++),t.last_lit===t.lit_bufsize-1},a._tr_align=function(t){l(t,D<<1,3),h(t,Y,$),f(t)}},{"../utils/common":3}],15:[function(t,e,a){"use strict";e.exports=function(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}},{}],"/":[function(t,e,a){"use strict";var i={};(0,t("./lib/utils/common").assign)(i,t("./lib/deflate"),t("./lib/inflate"),t("./lib/zlib/constants")),e.exports=i},{"./lib/deflate":1,"./lib/inflate":2,"./lib/utils/common":3,"./lib/zlib/constants":6}]},{},[])("/")});'use strict';tr.exportTo('tr.e.importer',function(){const GZIP_MEMBER_HEADER_ID_SIZE=3;const GZIP_HEADER_ID1=0x1f;const GZIP_HEADER_ID2=0x8b;const GZIP_DEFLATE_COMPRESSION=8;function _stringToUInt8Array(str){const array=new Uint8Array(str.length);for(let i=0;i<str.length;++i){array[i]=str.charCodeAt(i);} +return{InMemoryTraceStream,};});!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).pako=t()}}(function(){return function t(e,a,i){function n(s,o){if(!a[s]){if(!e[s]){var l="function"==typeof require&&require;if(!o&&l)return l(s,!0);if(r)return r(s,!0);var h=new Error("Cannot find module '"+s+"'");throw h.code="MODULE_NOT_FOUND",h}var d=a[s]={exports:{}};e[s][0].call(d.exports,function(t){var a=e[s][1][t];return n(a||t)},d,d.exports,t,e,a,i)}return a[s].exports}for(var r="function"==typeof require&&require,s=0;s<i.length;s++)n(i[s]);return n}({1:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({level:_,method:c,chunkSize:16384,windowBits:15,memLevel:8,strategy:u,to:""},t||{});var e=this.options;e.raw&&e.windowBits>0?e.windowBits=-e.windowBits:e.gzip&&e.windowBits>0&&e.windowBits<16&&(e.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new h,this.strm.avail_out=0;var a=r.deflateInit2(this.strm,e.level,e.method,e.windowBits,e.memLevel,e.strategy);if(a!==f)throw new Error(l[a]);if(e.header&&r.deflateSetHeader(this.strm,e.header),e.dictionary){var n;if(n="string"==typeof e.dictionary?o.string2buf(e.dictionary):"[object ArrayBuffer]"===d.call(e.dictionary)?new Uint8Array(e.dictionary):e.dictionary,(a=r.deflateSetDictionary(this.strm,n))!==f)throw new Error(l[a]);this._dict_set=!0}}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||l[a.err];return a.result}var r=t("./zlib/deflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/messages"),h=t("./zlib/zstream"),d=Object.prototype.toString,f=0,_=-1,u=0,c=8;i.prototype.push=function(t,e){var a,i,n=this.strm,l=this.options.chunkSize;if(this.ended)return!1;i=e===~~e?e:!0===e?4:0,"string"==typeof t?n.input=o.string2buf(t):"[object ArrayBuffer]"===d.call(t)?n.input=new Uint8Array(t):n.input=t,n.next_in=0,n.avail_in=n.input.length;do{if(0===n.avail_out&&(n.output=new s.Buf8(l),n.next_out=0,n.avail_out=l),1!==(a=r.deflate(n,i))&&a!==f)return this.onEnd(a),this.ended=!0,!1;0!==n.avail_out&&(0!==n.avail_in||4!==i&&2!==i)||("string"===this.options.to?this.onData(o.buf2binstring(s.shrinkBuf(n.output,n.next_out))):this.onData(s.shrinkBuf(n.output,n.next_out)))}while((n.avail_in>0||0===n.avail_out)&&1!==a);return 4===i?(a=r.deflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===f):2!==i||(this.onEnd(f),n.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===f&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Deflate=i,a.deflate=n,a.deflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.gzip=function(t,e){return e=e||{},e.gzip=!0,n(t,e)}},{"./utils/common":3,"./utils/strings":4,"./zlib/deflate":8,"./zlib/messages":13,"./zlib/zstream":15}],2:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({chunkSize:16384,windowBits:0,to:""},t||{});var e=this.options;e.raw&&e.windowBits>=0&&e.windowBits<16&&(e.windowBits=-e.windowBits,0===e.windowBits&&(e.windowBits=-15)),!(e.windowBits>=0&&e.windowBits<16)||t&&t.windowBits||(e.windowBits+=32),e.windowBits>15&&e.windowBits<48&&0==(15&e.windowBits)&&(e.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new d,this.strm.avail_out=0;var a=r.inflateInit2(this.strm,e.windowBits);if(a!==l.Z_OK)throw new Error(h[a]);this.header=new f,r.inflateGetHeader(this.strm,this.header)}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||h[a.err];return a.result}var r=t("./zlib/inflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/constants"),h=t("./zlib/messages"),d=t("./zlib/zstream"),f=t("./zlib/gzheader"),_=Object.prototype.toString;i.prototype.push=function(t,e){var a,i,n,h,d,f,u=this.strm,c=this.options.chunkSize,b=this.options.dictionary,g=!1;if(this.ended)return!1;i=e===~~e?e:!0===e?l.Z_FINISH:l.Z_NO_FLUSH,"string"==typeof t?u.input=o.binstring2buf(t):"[object ArrayBuffer]"===_.call(t)?u.input=new Uint8Array(t):u.input=t,u.next_in=0,u.avail_in=u.input.length;do{if(0===u.avail_out&&(u.output=new s.Buf8(c),u.next_out=0,u.avail_out=c),(a=r.inflate(u,l.Z_NO_FLUSH))===l.Z_NEED_DICT&&b&&(f="string"==typeof b?o.string2buf(b):"[object ArrayBuffer]"===_.call(b)?new Uint8Array(b):b,a=r.inflateSetDictionary(this.strm,f)),a===l.Z_BUF_ERROR&&!0===g&&(a=l.Z_OK,g=!1),a!==l.Z_STREAM_END&&a!==l.Z_OK)return this.onEnd(a),this.ended=!0,!1;u.next_out&&(0!==u.avail_out&&a!==l.Z_STREAM_END&&(0!==u.avail_in||i!==l.Z_FINISH&&i!==l.Z_SYNC_FLUSH)||("string"===this.options.to?(n=o.utf8border(u.output,u.next_out),h=u.next_out-n,d=o.buf2string(u.output,n),u.next_out=h,u.avail_out=c-h,h&&s.arraySet(u.output,u.output,n,h,0),this.onData(d)):this.onData(s.shrinkBuf(u.output,u.next_out)))),0===u.avail_in&&0===u.avail_out&&(g=!0)}while((u.avail_in>0||0===u.avail_out)&&a!==l.Z_STREAM_END);return a===l.Z_STREAM_END&&(i=l.Z_FINISH),i===l.Z_FINISH?(a=r.inflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===l.Z_OK):i!==l.Z_SYNC_FLUSH||(this.onEnd(l.Z_OK),u.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===l.Z_OK&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Inflate=i,a.inflate=n,a.inflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.ungzip=n},{"./utils/common":3,"./utils/strings":4,"./zlib/constants":6,"./zlib/gzheader":9,"./zlib/inflate":11,"./zlib/messages":13,"./zlib/zstream":15}],3:[function(t,e,a){"use strict";function i(t,e){return Object.prototype.hasOwnProperty.call(t,e)}var n="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;a.assign=function(t){for(var e=Array.prototype.slice.call(arguments,1);e.length;){var a=e.shift();if(a){if("object"!=typeof a)throw new TypeError(a+"must be non-object");for(var n in a)i(a,n)&&(t[n]=a[n])}}return t},a.shrinkBuf=function(t,e){return t.length===e?t:t.subarray?t.subarray(0,e):(t.length=e,t)};var r={arraySet:function(t,e,a,i,n){if(e.subarray&&t.subarray)t.set(e.subarray(a,a+i),n);else for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){var e,a,i,n,r,s;for(i=0,e=0,a=t.length;e<a;e++)i+=t[e].length;for(s=new Uint8Array(i),n=0,e=0,a=t.length;e<a;e++)r=t[e],s.set(r,n),n+=r.length;return s}},s={arraySet:function(t,e,a,i,n){for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){return[].concat.apply([],t)}};a.setTyped=function(t){t?(a.Buf8=Uint8Array,a.Buf16=Uint16Array,a.Buf32=Int32Array,a.assign(a,r)):(a.Buf8=Array,a.Buf16=Array,a.Buf32=Array,a.assign(a,s))},a.setTyped(n)},{}],4:[function(t,e,a){"use strict";function i(t,e){if(e<65537&&(t.subarray&&s||!t.subarray&&r))return String.fromCharCode.apply(null,n.shrinkBuf(t,e));for(var a="",i=0;i<e;i++)a+=String.fromCharCode(t[i]);return a}var n=t("./common"),r=!0,s=!0;try{String.fromCharCode.apply(null,[0])}catch(t){r=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(t){s=!1}for(var o=new n.Buf8(256),l=0;l<256;l++)o[l]=l>=252?6:l>=248?5:l>=240?4:l>=224?3:l>=192?2:1;o[254]=o[254]=1,a.string2buf=function(t){var e,a,i,r,s,o=t.length,l=0;for(r=0;r<o;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),l+=a<128?1:a<2048?2:a<65536?3:4;for(e=new n.Buf8(l),s=0,r=0;s<l;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),a<128?e[s++]=a:a<2048?(e[s++]=192|a>>>6,e[s++]=128|63&a):a<65536?(e[s++]=224|a>>>12,e[s++]=128|a>>>6&63,e[s++]=128|63&a):(e[s++]=240|a>>>18,e[s++]=128|a>>>12&63,e[s++]=128|a>>>6&63,e[s++]=128|63&a);return e},a.buf2binstring=function(t){return i(t,t.length)},a.binstring2buf=function(t){for(var e=new n.Buf8(t.length),a=0,i=e.length;a<i;a++)e[a]=t.charCodeAt(a);return e},a.buf2string=function(t,e){var a,n,r,s,l=e||t.length,h=new Array(2*l);for(n=0,a=0;a<l;)if((r=t[a++])<128)h[n++]=r;else if((s=o[r])>4)h[n++]=65533,a+=s-1;else{for(r&=2===s?31:3===s?15:7;s>1&&a<l;)r=r<<6|63&t[a++],s--;s>1?h[n++]=65533:r<65536?h[n++]=r:(r-=65536,h[n++]=55296|r>>10&1023,h[n++]=56320|1023&r)}return i(h,n)},a.utf8border=function(t,e){var a;for((e=e||t.length)>t.length&&(e=t.length),a=e-1;a>=0&&128==(192&t[a]);)a--;return a<0?e:0===a?e:a+o[t[a]]>e?a:e}},{"./common":3}],5:[function(t,e,a){"use strict";e.exports=function(t,e,a,i){for(var n=65535&t|0,r=t>>>16&65535|0,s=0;0!==a;){a-=s=a>2e3?2e3:a;do{r=r+(n=n+e[i++]|0)|0}while(--s);n%=65521,r%=65521}return n|r<<16|0}},{}],6:[function(t,e,a){"use strict";e.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],7:[function(t,e,a){"use strict";var i=function(){for(var t,e=[],a=0;a<256;a++){t=a;for(var i=0;i<8;i++)t=1&t?3988292384^t>>>1:t>>>1;e[a]=t}return e}();e.exports=function(t,e,a,n){var r=i,s=n+a;t^=-1;for(var o=n;o<s;o++)t=t>>>8^r[255&(t^e[o])];return-1^t}},{}],8:[function(t,e,a){"use strict";function i(t,e){return t.msg=A[e],e}function n(t){return(t<<1)-(t>4?9:0)}function r(t){for(var e=t.length;--e>=0;)t[e]=0}function s(t){var e=t.state,a=e.pending;a>t.avail_out&&(a=t.avail_out),0!==a&&(z.arraySet(t.output,e.pending_buf,e.pending_out,a,t.next_out),t.next_out+=a,e.pending_out+=a,t.total_out+=a,t.avail_out-=a,e.pending-=a,0===e.pending&&(e.pending_out=0))}function o(t,e){B._tr_flush_block(t,t.block_start>=0?t.block_start:-1,t.strstart-t.block_start,e),t.block_start=t.strstart,s(t.strm)}function l(t,e){t.pending_buf[t.pending++]=e}function h(t,e){t.pending_buf[t.pending++]=e>>>8&255,t.pending_buf[t.pending++]=255&e}function d(t,e,a,i){var n=t.avail_in;return n>i&&(n=i),0===n?0:(t.avail_in-=n,z.arraySet(e,t.input,t.next_in,n,a),1===t.state.wrap?t.adler=S(t.adler,e,n,a):2===t.state.wrap&&(t.adler=E(t.adler,e,n,a)),t.next_in+=n,t.total_in+=n,n)}function f(t,e){var a,i,n=t.max_chain_length,r=t.strstart,s=t.prev_length,o=t.nice_match,l=t.strstart>t.w_size-it?t.strstart-(t.w_size-it):0,h=t.window,d=t.w_mask,f=t.prev,_=t.strstart+at,u=h[r+s-1],c=h[r+s];t.prev_length>=t.good_match&&(n>>=2),o>t.lookahead&&(o=t.lookahead);do{if(a=e,h[a+s]===c&&h[a+s-1]===u&&h[a]===h[r]&&h[++a]===h[r+1]){r+=2,a++;do{}while(h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&r<_);if(i=at-(_-r),r=_-at,i>s){if(t.match_start=e,s=i,i>=o)break;u=h[r+s-1],c=h[r+s]}}}while((e=f[e&d])>l&&0!=--n);return s<=t.lookahead?s:t.lookahead}function _(t){var e,a,i,n,r,s=t.w_size;do{if(n=t.window_size-t.lookahead-t.strstart,t.strstart>=s+(s-it)){z.arraySet(t.window,t.window,s,s,0),t.match_start-=s,t.strstart-=s,t.block_start-=s,e=a=t.hash_size;do{i=t.head[--e],t.head[e]=i>=s?i-s:0}while(--a);e=a=s;do{i=t.prev[--e],t.prev[e]=i>=s?i-s:0}while(--a);n+=s}if(0===t.strm.avail_in)break;if(a=d(t.strm,t.window,t.strstart+t.lookahead,n),t.lookahead+=a,t.lookahead+t.insert>=et)for(r=t.strstart-t.insert,t.ins_h=t.window[r],t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+1])&t.hash_mask;t.insert&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+et-1])&t.hash_mask,t.prev[r&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=r,r++,t.insert--,!(t.lookahead+t.insert<et)););}while(t.lookahead<it&&0!==t.strm.avail_in)}function u(t,e){for(var a,i;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),0!==a&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a)),t.match_length>=et)if(i=B._tr_tally(t,t.strstart-t.match_start,t.match_length-et),t.lookahead-=t.match_length,t.match_length<=t.max_lazy_match&&t.lookahead>=et){t.match_length--;do{t.strstart++,t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart}while(0!=--t.match_length);t.strstart++}else t.strstart+=t.match_length,t.match_length=0,t.ins_h=t.window[t.strstart],t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+1])&t.hash_mask;else i=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++;if(i&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function c(t,e){for(var a,i,n;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),t.prev_length=t.match_length,t.prev_match=t.match_start,t.match_length=et-1,0!==a&&t.prev_length<t.max_lazy_match&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a),t.match_length<=5&&(t.strategy===H||t.match_length===et&&t.strstart-t.match_start>4096)&&(t.match_length=et-1)),t.prev_length>=et&&t.match_length<=t.prev_length){n=t.strstart+t.lookahead-et,i=B._tr_tally(t,t.strstart-1-t.prev_match,t.prev_length-et),t.lookahead-=t.prev_length-1,t.prev_length-=2;do{++t.strstart<=n&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart)}while(0!=--t.prev_length);if(t.match_available=0,t.match_length=et-1,t.strstart++,i&&(o(t,!1),0===t.strm.avail_out))return _t}else if(t.match_available){if((i=B._tr_tally(t,0,t.window[t.strstart-1]))&&o(t,!1),t.strstart++,t.lookahead--,0===t.strm.avail_out)return _t}else t.match_available=1,t.strstart++,t.lookahead--}return t.match_available&&(i=B._tr_tally(t,0,t.window[t.strstart-1]),t.match_available=0),t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function b(t,e){for(var a,i,n,r,s=t.window;;){if(t.lookahead<=at){if(_(t),t.lookahead<=at&&e===Z)return _t;if(0===t.lookahead)break}if(t.match_length=0,t.lookahead>=et&&t.strstart>0&&(n=t.strstart-1,(i=s[n])===s[++n]&&i===s[++n]&&i===s[++n])){r=t.strstart+at;do{}while(i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&n<r);t.match_length=at-(r-n),t.match_length>t.lookahead&&(t.match_length=t.lookahead)}if(t.match_length>=et?(a=B._tr_tally(t,1,t.match_length-et),t.lookahead-=t.match_length,t.strstart+=t.match_length,t.match_length=0):(a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++),a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function g(t,e){for(var a;;){if(0===t.lookahead&&(_(t),0===t.lookahead)){if(e===Z)return _t;break}if(t.match_length=0,a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++,a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function m(t,e,a,i,n){this.good_length=t,this.max_lazy=e,this.nice_length=a,this.max_chain=i,this.func=n}function w(t){t.window_size=2*t.w_size,r(t.head),t.max_lazy_match=x[t.level].max_lazy,t.good_match=x[t.level].good_length,t.nice_match=x[t.level].nice_length,t.max_chain_length=x[t.level].max_chain,t.strstart=0,t.block_start=0,t.lookahead=0,t.insert=0,t.match_length=t.prev_length=et-1,t.match_available=0,t.ins_h=0}function p(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=q,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new z.Buf16(2*$),this.dyn_dtree=new z.Buf16(2*(2*Q+1)),this.bl_tree=new z.Buf16(2*(2*V+1)),r(this.dyn_ltree),r(this.dyn_dtree),r(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new z.Buf16(tt+1),this.heap=new z.Buf16(2*J+1),r(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new z.Buf16(2*J+1),r(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function v(t){var e;return t&&t.state?(t.total_in=t.total_out=0,t.data_type=Y,e=t.state,e.pending=0,e.pending_out=0,e.wrap<0&&(e.wrap=-e.wrap),e.status=e.wrap?rt:dt,t.adler=2===e.wrap?0:1,e.last_flush=Z,B._tr_init(e),D):i(t,U)}function k(t){var e=v(t);return e===D&&w(t.state),e}function y(t,e,a,n,r,s){if(!t)return U;var o=1;if(e===L&&(e=6),n<0?(o=0,n=-n):n>15&&(o=2,n-=16),r<1||r>G||a!==q||n<8||n>15||e<0||e>9||s<0||s>M)return i(t,U);8===n&&(n=9);var l=new p;return t.state=l,l.strm=t,l.wrap=o,l.gzhead=null,l.w_bits=n,l.w_size=1<<l.w_bits,l.w_mask=l.w_size-1,l.hash_bits=r+7,l.hash_size=1<<l.hash_bits,l.hash_mask=l.hash_size-1,l.hash_shift=~~((l.hash_bits+et-1)/et),l.window=new z.Buf8(2*l.w_size),l.head=new z.Buf16(l.hash_size),l.prev=new z.Buf16(l.w_size),l.lit_bufsize=1<<r+6,l.pending_buf_size=4*l.lit_bufsize,l.pending_buf=new z.Buf8(l.pending_buf_size),l.d_buf=1*l.lit_bufsize,l.l_buf=3*l.lit_bufsize,l.level=e,l.strategy=s,l.method=a,k(t)}var x,z=t("../utils/common"),B=t("./trees"),S=t("./adler32"),E=t("./crc32"),A=t("./messages"),Z=0,R=1,C=3,N=4,O=5,D=0,I=1,U=-2,T=-3,F=-5,L=-1,H=1,j=2,K=3,M=4,P=0,Y=2,q=8,G=9,X=15,W=8,J=286,Q=30,V=19,$=2*J+1,tt=15,et=3,at=258,it=at+et+1,nt=32,rt=42,st=69,ot=73,lt=91,ht=103,dt=113,ft=666,_t=1,ut=2,ct=3,bt=4,gt=3;x=[new m(0,0,0,0,function(t,e){var a=65535;for(a>t.pending_buf_size-5&&(a=t.pending_buf_size-5);;){if(t.lookahead<=1){if(_(t),0===t.lookahead&&e===Z)return _t;if(0===t.lookahead)break}t.strstart+=t.lookahead,t.lookahead=0;var i=t.block_start+a;if((0===t.strstart||t.strstart>=i)&&(t.lookahead=t.strstart-i,t.strstart=i,o(t,!1),0===t.strm.avail_out))return _t;if(t.strstart-t.block_start>=t.w_size-it&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):(t.strstart>t.block_start&&(o(t,!1),t.strm.avail_out),_t)}),new m(4,4,8,4,u),new m(4,5,16,8,u),new m(4,6,32,32,u),new m(4,4,16,16,c),new m(8,16,32,32,c),new m(8,16,128,128,c),new m(8,32,128,256,c),new m(32,128,258,1024,c),new m(32,258,258,4096,c)],a.deflateInit=function(t,e){return y(t,e,q,X,W,P)},a.deflateInit2=y,a.deflateReset=k,a.deflateResetKeep=v,a.deflateSetHeader=function(t,e){return t&&t.state?2!==t.state.wrap?U:(t.state.gzhead=e,D):U},a.deflate=function(t,e){var a,o,d,f;if(!t||!t.state||e>O||e<0)return t?i(t,U):U;if(o=t.state,!t.output||!t.input&&0!==t.avail_in||o.status===ft&&e!==N)return i(t,0===t.avail_out?F:U);if(o.strm=t,a=o.last_flush,o.last_flush=e,o.status===rt)if(2===o.wrap)t.adler=0,l(o,31),l(o,139),l(o,8),o.gzhead?(l(o,(o.gzhead.text?1:0)+(o.gzhead.hcrc?2:0)+(o.gzhead.extra?4:0)+(o.gzhead.name?8:0)+(o.gzhead.comment?16:0)),l(o,255&o.gzhead.time),l(o,o.gzhead.time>>8&255),l(o,o.gzhead.time>>16&255),l(o,o.gzhead.time>>24&255),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,255&o.gzhead.os),o.gzhead.extra&&o.gzhead.extra.length&&(l(o,255&o.gzhead.extra.length),l(o,o.gzhead.extra.length>>8&255)),o.gzhead.hcrc&&(t.adler=E(t.adler,o.pending_buf,o.pending,0)),o.gzindex=0,o.status=st):(l(o,0),l(o,0),l(o,0),l(o,0),l(o,0),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,gt),o.status=dt);else{var _=q+(o.w_bits-8<<4)<<8;_|=(o.strategy>=j||o.level<2?0:o.level<6?1:6===o.level?2:3)<<6,0!==o.strstart&&(_|=nt),_+=31-_%31,o.status=dt,h(o,_),0!==o.strstart&&(h(o,t.adler>>>16),h(o,65535&t.adler)),t.adler=1}if(o.status===st)if(o.gzhead.extra){for(d=o.pending;o.gzindex<(65535&o.gzhead.extra.length)&&(o.pending!==o.pending_buf_size||(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending!==o.pending_buf_size));)l(o,255&o.gzhead.extra[o.gzindex]),o.gzindex++;o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),o.gzindex===o.gzhead.extra.length&&(o.gzindex=0,o.status=ot)}else o.status=ot;if(o.status===ot)if(o.gzhead.name){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.name.length?255&o.gzhead.name.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.gzindex=0,o.status=lt)}else o.status=lt;if(o.status===lt)if(o.gzhead.comment){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.comment.length?255&o.gzhead.comment.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.status=ht)}else o.status=ht;if(o.status===ht&&(o.gzhead.hcrc?(o.pending+2>o.pending_buf_size&&s(t),o.pending+2<=o.pending_buf_size&&(l(o,255&t.adler),l(o,t.adler>>8&255),t.adler=0,o.status=dt)):o.status=dt),0!==o.pending){if(s(t),0===t.avail_out)return o.last_flush=-1,D}else if(0===t.avail_in&&n(e)<=n(a)&&e!==N)return i(t,F);if(o.status===ft&&0!==t.avail_in)return i(t,F);if(0!==t.avail_in||0!==o.lookahead||e!==Z&&o.status!==ft){var u=o.strategy===j?g(o,e):o.strategy===K?b(o,e):x[o.level].func(o,e);if(u!==ct&&u!==bt||(o.status=ft),u===_t||u===ct)return 0===t.avail_out&&(o.last_flush=-1),D;if(u===ut&&(e===R?B._tr_align(o):e!==O&&(B._tr_stored_block(o,0,0,!1),e===C&&(r(o.head),0===o.lookahead&&(o.strstart=0,o.block_start=0,o.insert=0))),s(t),0===t.avail_out))return o.last_flush=-1,D}return e!==N?D:o.wrap<=0?I:(2===o.wrap?(l(o,255&t.adler),l(o,t.adler>>8&255),l(o,t.adler>>16&255),l(o,t.adler>>24&255),l(o,255&t.total_in),l(o,t.total_in>>8&255),l(o,t.total_in>>16&255),l(o,t.total_in>>24&255)):(h(o,t.adler>>>16),h(o,65535&t.adler)),s(t),o.wrap>0&&(o.wrap=-o.wrap),0!==o.pending?D:I)},a.deflateEnd=function(t){var e;return t&&t.state?(e=t.state.status)!==rt&&e!==st&&e!==ot&&e!==lt&&e!==ht&&e!==dt&&e!==ft?i(t,U):(t.state=null,e===dt?i(t,T):D):U},a.deflateSetDictionary=function(t,e){var a,i,n,s,o,l,h,d,f=e.length;if(!t||!t.state)return U;if(a=t.state,2===(s=a.wrap)||1===s&&a.status!==rt||a.lookahead)return U;for(1===s&&(t.adler=S(t.adler,e,f,0)),a.wrap=0,f>=a.w_size&&(0===s&&(r(a.head),a.strstart=0,a.block_start=0,a.insert=0),d=new z.Buf8(a.w_size),z.arraySet(d,e,f-a.w_size,a.w_size,0),e=d,f=a.w_size),o=t.avail_in,l=t.next_in,h=t.input,t.avail_in=f,t.next_in=0,t.input=e,_(a);a.lookahead>=et;){i=a.strstart,n=a.lookahead-(et-1);do{a.ins_h=(a.ins_h<<a.hash_shift^a.window[i+et-1])&a.hash_mask,a.prev[i&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=i,i++}while(--n);a.strstart=i,a.lookahead=et-1,_(a)}return a.strstart+=a.lookahead,a.block_start=a.strstart,a.insert=a.lookahead,a.lookahead=0,a.match_length=a.prev_length=et-1,a.match_available=0,t.next_in=l,t.input=h,t.avail_in=o,a.wrap=s,D},a.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./messages":13,"./trees":14}],9:[function(t,e,a){"use strict";e.exports=function(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}},{}],10:[function(t,e,a){"use strict";e.exports=function(t,e){var a,i,n,r,s,o,l,h,d,f,_,u,c,b,g,m,w,p,v,k,y,x,z,B,S;a=t.state,i=t.next_in,B=t.input,n=i+(t.avail_in-5),r=t.next_out,S=t.output,s=r-(e-t.avail_out),o=r+(t.avail_out-257),l=a.dmax,h=a.wsize,d=a.whave,f=a.wnext,_=a.window,u=a.hold,c=a.bits,b=a.lencode,g=a.distcode,m=(1<<a.lenbits)-1,w=(1<<a.distbits)-1;t:do{c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=b[u&m];e:for(;;){if(v=p>>>24,u>>>=v,c-=v,0===(v=p>>>16&255))S[r++]=65535&p;else{if(!(16&v)){if(0==(64&v)){p=b[(65535&p)+(u&(1<<v)-1)];continue e}if(32&v){a.mode=12;break t}t.msg="invalid literal/length code",a.mode=30;break t}k=65535&p,(v&=15)&&(c<v&&(u+=B[i++]<<c,c+=8),k+=u&(1<<v)-1,u>>>=v,c-=v),c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=g[u&w];a:for(;;){if(v=p>>>24,u>>>=v,c-=v,!(16&(v=p>>>16&255))){if(0==(64&v)){p=g[(65535&p)+(u&(1<<v)-1)];continue a}t.msg="invalid distance code",a.mode=30;break t}if(y=65535&p,v&=15,c<v&&(u+=B[i++]<<c,(c+=8)<v&&(u+=B[i++]<<c,c+=8)),(y+=u&(1<<v)-1)>l){t.msg="invalid distance too far back",a.mode=30;break t}if(u>>>=v,c-=v,v=r-s,y>v){if((v=y-v)>d&&a.sane){t.msg="invalid distance too far back",a.mode=30;break t}if(x=0,z=_,0===f){if(x+=h-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}else if(f<v){if(x+=h+f-v,(v-=f)<k){k-=v;do{S[r++]=_[x++]}while(--v);if(x=0,f<k){k-=v=f;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}}else if(x+=f-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}for(;k>2;)S[r++]=z[x++],S[r++]=z[x++],S[r++]=z[x++],k-=3;k&&(S[r++]=z[x++],k>1&&(S[r++]=z[x++]))}else{x=r-y;do{S[r++]=S[x++],S[r++]=S[x++],S[r++]=S[x++],k-=3}while(k>2);k&&(S[r++]=S[x++],k>1&&(S[r++]=S[x++]))}break}}break}}while(i<n&&r<o);i-=k=c>>3,u&=(1<<(c-=k<<3))-1,t.next_in=i,t.next_out=r,t.avail_in=i<n?n-i+5:5-(i-n),t.avail_out=r<o?o-r+257:257-(r-o),a.hold=u,a.bits=c}},{}],11:[function(t,e,a){"use strict";function i(t){return(t>>>24&255)+(t>>>8&65280)+((65280&t)<<8)+((255&t)<<24)}function n(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new u.Buf16(320),this.work=new u.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function r(t){var e;return t&&t.state?(e=t.state,t.total_in=t.total_out=e.total=0,t.msg="",e.wrap&&(t.adler=1&e.wrap),e.mode=N,e.last=0,e.havedict=0,e.dmax=32768,e.head=null,e.hold=0,e.bits=0,e.lencode=e.lendyn=new u.Buf32(dt),e.distcode=e.distdyn=new u.Buf32(ft),e.sane=1,e.back=-1,z):E}function s(t){var e;return t&&t.state?(e=t.state,e.wsize=0,e.whave=0,e.wnext=0,r(t)):E}function o(t,e){var a,i;return t&&t.state?(i=t.state,e<0?(a=0,e=-e):(a=1+(e>>4),e<48&&(e&=15)),e&&(e<8||e>15)?E:(null!==i.window&&i.wbits!==e&&(i.window=null),i.wrap=a,i.wbits=e,s(t))):E}function l(t,e){var a,i;return t?(i=new n,t.state=i,i.window=null,(a=o(t,e))!==z&&(t.state=null),a):E}function h(t){if(ut){var e;for(f=new u.Buf32(512),_=new u.Buf32(32),e=0;e<144;)t.lens[e++]=8;for(;e<256;)t.lens[e++]=9;for(;e<280;)t.lens[e++]=7;for(;e<288;)t.lens[e++]=8;for(m(p,t.lens,0,288,f,0,t.work,{bits:9}),e=0;e<32;)t.lens[e++]=5;m(v,t.lens,0,32,_,0,t.work,{bits:5}),ut=!1}t.lencode=f,t.lenbits=9,t.distcode=_,t.distbits=5}function d(t,e,a,i){var n,r=t.state;return null===r.window&&(r.wsize=1<<r.wbits,r.wnext=0,r.whave=0,r.window=new u.Buf8(r.wsize)),i>=r.wsize?(u.arraySet(r.window,e,a-r.wsize,r.wsize,0),r.wnext=0,r.whave=r.wsize):((n=r.wsize-r.wnext)>i&&(n=i),u.arraySet(r.window,e,a-i,n,r.wnext),(i-=n)?(u.arraySet(r.window,e,a-i,i,0),r.wnext=i,r.whave=r.wsize):(r.wnext+=n,r.wnext===r.wsize&&(r.wnext=0),r.whave<r.wsize&&(r.whave+=n))),0}var f,_,u=t("../utils/common"),c=t("./adler32"),b=t("./crc32"),g=t("./inffast"),m=t("./inftrees"),w=0,p=1,v=2,k=4,y=5,x=6,z=0,B=1,S=2,E=-2,A=-3,Z=-4,R=-5,C=8,N=1,O=2,D=3,I=4,U=5,T=6,F=7,L=8,H=9,j=10,K=11,M=12,P=13,Y=14,q=15,G=16,X=17,W=18,J=19,Q=20,V=21,$=22,tt=23,et=24,at=25,it=26,nt=27,rt=28,st=29,ot=30,lt=31,ht=32,dt=852,ft=592,_t=15,ut=!0;a.inflateReset=s,a.inflateReset2=o,a.inflateResetKeep=r,a.inflateInit=function(t){return l(t,_t)},a.inflateInit2=l,a.inflate=function(t,e){var a,n,r,s,o,l,f,_,dt,ft,_t,ut,ct,bt,gt,mt,wt,pt,vt,kt,yt,xt,zt,Bt,St=0,Et=new u.Buf8(4),At=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!t||!t.state||!t.output||!t.input&&0!==t.avail_in)return E;(a=t.state).mode===M&&(a.mode=P),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,ft=l,_t=f,xt=z;t:for(;;)switch(a.mode){case N:if(0===a.wrap){a.mode=P;break}for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(2&a.wrap&&35615===_){a.check=0,Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0),_=0,dt=0,a.mode=O;break}if(a.flags=0,a.head&&(a.head.done=!1),!(1&a.wrap)||(((255&_)<<8)+(_>>8))%31){t.msg="incorrect header check",a.mode=ot;break}if((15&_)!==C){t.msg="unknown compression method",a.mode=ot;break}if(_>>>=4,dt-=4,yt=8+(15&_),0===a.wbits)a.wbits=yt;else if(yt>a.wbits){t.msg="invalid window size",a.mode=ot;break}a.dmax=1<<yt,t.adler=a.check=1,a.mode=512&_?j:M,_=0,dt=0;break;case O:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.flags=_,(255&a.flags)!==C){t.msg="unknown compression method",a.mode=ot;break}if(57344&a.flags){t.msg="unknown header flags set",a.mode=ot;break}a.head&&(a.head.text=_>>8&1),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=D;case D:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.time=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,Et[2]=_>>>16&255,Et[3]=_>>>24&255,a.check=b(a.check,Et,4,0)),_=0,dt=0,a.mode=I;case I:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.xflags=255&_,a.head.os=_>>8),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=U;case U:if(1024&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length=_,a.head&&(a.head.extra_len=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0}else a.head&&(a.head.extra=null);a.mode=T;case T:if(1024&a.flags&&((ut=a.length)>l&&(ut=l),ut&&(a.head&&(yt=a.head.extra_len-a.length,a.head.extra||(a.head.extra=new Array(a.head.extra_len)),u.arraySet(a.head.extra,n,s,ut,yt)),512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,a.length-=ut),a.length))break t;a.length=0,a.mode=F;case F:if(2048&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.name+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.name=null);a.length=0,a.mode=L;case L:if(4096&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.comment+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.comment=null);a.mode=H;case H:if(512&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(65535&a.check)){t.msg="header crc mismatch",a.mode=ot;break}_=0,dt=0}a.head&&(a.head.hcrc=a.flags>>9&1,a.head.done=!0),t.adler=a.check=0,a.mode=M;break;case j:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}t.adler=a.check=i(_),_=0,dt=0,a.mode=K;case K:if(0===a.havedict)return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,S;t.adler=a.check=1,a.mode=M;case M:if(e===y||e===x)break t;case P:if(a.last){_>>>=7&dt,dt-=7&dt,a.mode=nt;break}for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}switch(a.last=1&_,_>>>=1,dt-=1,3&_){case 0:a.mode=Y;break;case 1:if(h(a),a.mode=Q,e===x){_>>>=2,dt-=2;break t}break;case 2:a.mode=X;break;case 3:t.msg="invalid block type",a.mode=ot}_>>>=2,dt-=2;break;case Y:for(_>>>=7&dt,dt-=7&dt;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if((65535&_)!=(_>>>16^65535)){t.msg="invalid stored block lengths",a.mode=ot;break}if(a.length=65535&_,_=0,dt=0,a.mode=q,e===x)break t;case q:a.mode=G;case G:if(ut=a.length){if(ut>l&&(ut=l),ut>f&&(ut=f),0===ut)break t;u.arraySet(r,n,s,ut,o),l-=ut,s+=ut,f-=ut,o+=ut,a.length-=ut;break}a.mode=M;break;case X:for(;dt<14;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.nlen=257+(31&_),_>>>=5,dt-=5,a.ndist=1+(31&_),_>>>=5,dt-=5,a.ncode=4+(15&_),_>>>=4,dt-=4,a.nlen>286||a.ndist>30){t.msg="too many length or distance symbols",a.mode=ot;break}a.have=0,a.mode=W;case W:for(;a.have<a.ncode;){for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.lens[At[a.have++]]=7&_,_>>>=3,dt-=3}for(;a.have<19;)a.lens[At[a.have++]]=0;if(a.lencode=a.lendyn,a.lenbits=7,zt={bits:a.lenbits},xt=m(w,a.lens,0,19,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid code lengths set",a.mode=ot;break}a.have=0,a.mode=J;case J:for(;a.have<a.nlen+a.ndist;){for(;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(wt<16)_>>>=gt,dt-=gt,a.lens[a.have++]=wt;else{if(16===wt){for(Bt=gt+2;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_>>>=gt,dt-=gt,0===a.have){t.msg="invalid bit length repeat",a.mode=ot;break}yt=a.lens[a.have-1],ut=3+(3&_),_>>>=2,dt-=2}else if(17===wt){for(Bt=gt+3;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=3+(7&(_>>>=gt)),_>>>=3,dt-=3}else{for(Bt=gt+7;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=11+(127&(_>>>=gt)),_>>>=7,dt-=7}if(a.have+ut>a.nlen+a.ndist){t.msg="invalid bit length repeat",a.mode=ot;break}for(;ut--;)a.lens[a.have++]=yt}}if(a.mode===ot)break;if(0===a.lens[256]){t.msg="invalid code -- missing end-of-block",a.mode=ot;break}if(a.lenbits=9,zt={bits:a.lenbits},xt=m(p,a.lens,0,a.nlen,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid literal/lengths set",a.mode=ot;break}if(a.distbits=6,a.distcode=a.distdyn,zt={bits:a.distbits},xt=m(v,a.lens,a.nlen,a.ndist,a.distcode,0,a.work,zt),a.distbits=zt.bits,xt){t.msg="invalid distances set",a.mode=ot;break}if(a.mode=Q,e===x)break t;case Q:a.mode=V;case V:if(l>=6&&f>=258){t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,g(t,_t),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,a.mode===M&&(a.back=-1);break}for(a.back=0;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(mt&&0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.lencode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,a.length=wt,0===mt){a.mode=it;break}if(32&mt){a.back=-1,a.mode=M;break}if(64&mt){t.msg="invalid literal/length code",a.mode=ot;break}a.extra=15&mt,a.mode=$;case $:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}a.was=a.length,a.mode=tt;case tt:for(;St=a.distcode[_&(1<<a.distbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.distcode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,64&mt){t.msg="invalid distance code",a.mode=ot;break}a.offset=wt,a.extra=15&mt,a.mode=et;case et:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.offset+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}if(a.offset>a.dmax){t.msg="invalid distance too far back",a.mode=ot;break}a.mode=at;case at:if(0===f)break t;if(ut=_t-f,a.offset>ut){if((ut=a.offset-ut)>a.whave&&a.sane){t.msg="invalid distance too far back",a.mode=ot;break}ut>a.wnext?(ut-=a.wnext,ct=a.wsize-ut):ct=a.wnext-ut,ut>a.length&&(ut=a.length),bt=a.window}else bt=r,ct=o-a.offset,ut=a.length;ut>f&&(ut=f),f-=ut,a.length-=ut;do{r[o++]=bt[ct++]}while(--ut);0===a.length&&(a.mode=V);break;case it:if(0===f)break t;r[o++]=a.length,f--,a.mode=V;break;case nt:if(a.wrap){for(;dt<32;){if(0===l)break t;l--,_|=n[s++]<<dt,dt+=8}if(_t-=f,t.total_out+=_t,a.total+=_t,_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,o-_t):c(a.check,r,_t,o-_t)),_t=f,(a.flags?_:i(_))!==a.check){t.msg="incorrect data check",a.mode=ot;break}_=0,dt=0}a.mode=rt;case rt:if(a.wrap&&a.flags){for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(4294967295&a.total)){t.msg="incorrect length check",a.mode=ot;break}_=0,dt=0}a.mode=st;case st:xt=B;break t;case ot:xt=A;break t;case lt:return Z;case ht:default:return E}return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,(a.wsize||_t!==t.avail_out&&a.mode<ot&&(a.mode<nt||e!==k))&&d(t,t.output,t.next_out,_t-t.avail_out)?(a.mode=lt,Z):(ft-=t.avail_in,_t-=t.avail_out,t.total_in+=ft,t.total_out+=_t,a.total+=_t,a.wrap&&_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,t.next_out-_t):c(a.check,r,_t,t.next_out-_t)),t.data_type=a.bits+(a.last?64:0)+(a.mode===M?128:0)+(a.mode===Q||a.mode===q?256:0),(0===ft&&0===_t||e===k)&&xt===z&&(xt=R),xt)},a.inflateEnd=function(t){if(!t||!t.state)return E;var e=t.state;return e.window&&(e.window=null),t.state=null,z},a.inflateGetHeader=function(t,e){var a;return t&&t.state?0==(2&(a=t.state).wrap)?E:(a.head=e,e.done=!1,z):E},a.inflateSetDictionary=function(t,e){var a,i,n=e.length;return t&&t.state?0!==(a=t.state).wrap&&a.mode!==K?E:a.mode===K&&(i=1,(i=c(i,e,n,0))!==a.check)?A:d(t,e,n,n)?(a.mode=lt,Z):(a.havedict=1,z):E},a.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./inffast":10,"./inftrees":12}],12:[function(t,e,a){"use strict";var i=t("../utils/common"),n=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],r=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],s=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],o=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];e.exports=function(t,e,a,l,h,d,f,_){var u,c,b,g,m,w,p,v,k,y=_.bits,x=0,z=0,B=0,S=0,E=0,A=0,Z=0,R=0,C=0,N=0,O=null,D=0,I=new i.Buf16(16),U=new i.Buf16(16),T=null,F=0;for(x=0;x<=15;x++)I[x]=0;for(z=0;z<l;z++)I[e[a+z]]++;for(E=y,S=15;S>=1&&0===I[S];S--);if(E>S&&(E=S),0===S)return h[d++]=20971520,h[d++]=20971520,_.bits=1,0;for(B=1;B<S&&0===I[B];B++);for(E<B&&(E=B),R=1,x=1;x<=15;x++)if(R<<=1,(R-=I[x])<0)return-1;if(R>0&&(0===t||1!==S))return-1;for(U[1]=0,x=1;x<15;x++)U[x+1]=U[x]+I[x];for(z=0;z<l;z++)0!==e[a+z]&&(f[U[e[a+z]]++]=z);if(0===t?(O=T=f,w=19):1===t?(O=n,D-=257,T=r,F-=257,w=256):(O=s,T=o,w=-1),N=0,z=0,x=B,m=d,A=E,Z=0,b=-1,C=1<<E,g=C-1,1===t&&C>852||2===t&&C>592)return 1;for(;;){p=x-Z,f[z]<w?(v=0,k=f[z]):f[z]>w?(v=T[F+f[z]],k=O[D+f[z]]):(v=96,k=0),u=1<<x-Z,B=c=1<<A;do{h[m+(N>>Z)+(c-=u)]=p<<24|v<<16|k|0}while(0!==c);for(u=1<<x-1;N&u;)u>>=1;if(0!==u?(N&=u-1,N+=u):N=0,z++,0==--I[x]){if(x===S)break;x=e[a+f[z]]}if(x>E&&(N&g)!==b){for(0===Z&&(Z=E),m+=B,R=1<<(A=x-Z);A+Z<S&&!((R-=I[A+Z])<=0);)A++,R<<=1;if(C+=1<<A,1===t&&C>852||2===t&&C>592)return 1;h[b=N&g]=E<<24|A<<16|m-d|0}}return 0!==N&&(h[m+N]=x-Z<<24|64<<16|0),_.bits=E,0}},{"../utils/common":3}],13:[function(t,e,a){"use strict";e.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],14:[function(t,e,a){"use strict";function i(t){for(var e=t.length;--e>=0;)t[e]=0}function n(t,e,a,i,n){this.static_tree=t,this.extra_bits=e,this.extra_base=a,this.elems=i,this.max_length=n,this.has_stree=t&&t.length}function r(t,e){this.dyn_tree=t,this.max_code=0,this.stat_desc=e}function s(t){return t<256?et[t]:et[256+(t>>>7)]}function o(t,e){t.pending_buf[t.pending++]=255&e,t.pending_buf[t.pending++]=e>>>8&255}function l(t,e,a){t.bi_valid>M-a?(t.bi_buf|=e<<t.bi_valid&65535,o(t,t.bi_buf),t.bi_buf=e>>M-t.bi_valid,t.bi_valid+=a-M):(t.bi_buf|=e<<t.bi_valid&65535,t.bi_valid+=a)}function h(t,e,a){l(t,a[2*e],a[2*e+1])}function d(t,e){var a=0;do{a|=1&t,t>>>=1,a<<=1}while(--e>0);return a>>>1}function f(t){16===t.bi_valid?(o(t,t.bi_buf),t.bi_buf=0,t.bi_valid=0):t.bi_valid>=8&&(t.pending_buf[t.pending++]=255&t.bi_buf,t.bi_buf>>=8,t.bi_valid-=8)}function _(t,e){var a,i,n,r,s,o,l=e.dyn_tree,h=e.max_code,d=e.stat_desc.static_tree,f=e.stat_desc.has_stree,_=e.stat_desc.extra_bits,u=e.stat_desc.extra_base,c=e.stat_desc.max_length,b=0;for(r=0;r<=K;r++)t.bl_count[r]=0;for(l[2*t.heap[t.heap_max]+1]=0,a=t.heap_max+1;a<j;a++)(r=l[2*l[2*(i=t.heap[a])+1]+1]+1)>c&&(r=c,b++),l[2*i+1]=r,i>h||(t.bl_count[r]++,s=0,i>=u&&(s=_[i-u]),o=l[2*i],t.opt_len+=o*(r+s),f&&(t.static_len+=o*(d[2*i+1]+s)));if(0!==b){do{for(r=c-1;0===t.bl_count[r];)r--;t.bl_count[r]--,t.bl_count[r+1]+=2,t.bl_count[c]--,b-=2}while(b>0);for(r=c;0!==r;r--)for(i=t.bl_count[r];0!==i;)(n=t.heap[--a])>h||(l[2*n+1]!==r&&(t.opt_len+=(r-l[2*n+1])*l[2*n],l[2*n+1]=r),i--)}}function u(t,e,a){var i,n,r=new Array(K+1),s=0;for(i=1;i<=K;i++)r[i]=s=s+a[i-1]<<1;for(n=0;n<=e;n++){var o=t[2*n+1];0!==o&&(t[2*n]=d(r[o]++,o))}}function c(){var t,e,a,i,r,s=new Array(K+1);for(a=0,i=0;i<U-1;i++)for(it[i]=a,t=0;t<1<<W[i];t++)at[a++]=i;for(at[a-1]=i,r=0,i=0;i<16;i++)for(nt[i]=r,t=0;t<1<<J[i];t++)et[r++]=i;for(r>>=7;i<L;i++)for(nt[i]=r<<7,t=0;t<1<<J[i]-7;t++)et[256+r++]=i;for(e=0;e<=K;e++)s[e]=0;for(t=0;t<=143;)$[2*t+1]=8,t++,s[8]++;for(;t<=255;)$[2*t+1]=9,t++,s[9]++;for(;t<=279;)$[2*t+1]=7,t++,s[7]++;for(;t<=287;)$[2*t+1]=8,t++,s[8]++;for(u($,F+1,s),t=0;t<L;t++)tt[2*t+1]=5,tt[2*t]=d(t,5);rt=new n($,W,T+1,F,K),st=new n(tt,J,0,L,K),ot=new n(new Array(0),Q,0,H,P)}function b(t){var e;for(e=0;e<F;e++)t.dyn_ltree[2*e]=0;for(e=0;e<L;e++)t.dyn_dtree[2*e]=0;for(e=0;e<H;e++)t.bl_tree[2*e]=0;t.dyn_ltree[2*Y]=1,t.opt_len=t.static_len=0,t.last_lit=t.matches=0}function g(t){t.bi_valid>8?o(t,t.bi_buf):t.bi_valid>0&&(t.pending_buf[t.pending++]=t.bi_buf),t.bi_buf=0,t.bi_valid=0}function m(t,e,a,i){g(t),i&&(o(t,a),o(t,~a)),A.arraySet(t.pending_buf,t.window,e,a,t.pending),t.pending+=a}function w(t,e,a,i){var n=2*e,r=2*a;return t[n]<t[r]||t[n]===t[r]&&i[e]<=i[a]}function p(t,e,a){for(var i=t.heap[a],n=a<<1;n<=t.heap_len&&(n<t.heap_len&&w(e,t.heap[n+1],t.heap[n],t.depth)&&n++,!w(e,i,t.heap[n],t.depth));)t.heap[a]=t.heap[n],a=n,n<<=1;t.heap[a]=i}function v(t,e,a){var i,n,r,o,d=0;if(0!==t.last_lit)do{i=t.pending_buf[t.d_buf+2*d]<<8|t.pending_buf[t.d_buf+2*d+1],n=t.pending_buf[t.l_buf+d],d++,0===i?h(t,n,e):(h(t,(r=at[n])+T+1,e),0!==(o=W[r])&&l(t,n-=it[r],o),h(t,r=s(--i),a),0!==(o=J[r])&&l(t,i-=nt[r],o))}while(d<t.last_lit);h(t,Y,e)}function k(t,e){var a,i,n,r=e.dyn_tree,s=e.stat_desc.static_tree,o=e.stat_desc.has_stree,l=e.stat_desc.elems,h=-1;for(t.heap_len=0,t.heap_max=j,a=0;a<l;a++)0!==r[2*a]?(t.heap[++t.heap_len]=h=a,t.depth[a]=0):r[2*a+1]=0;for(;t.heap_len<2;)r[2*(n=t.heap[++t.heap_len]=h<2?++h:0)]=1,t.depth[n]=0,t.opt_len--,o&&(t.static_len-=s[2*n+1]);for(e.max_code=h,a=t.heap_len>>1;a>=1;a--)p(t,r,a);n=l;do{a=t.heap[1],t.heap[1]=t.heap[t.heap_len--],p(t,r,1),i=t.heap[1],t.heap[--t.heap_max]=a,t.heap[--t.heap_max]=i,r[2*n]=r[2*a]+r[2*i],t.depth[n]=(t.depth[a]>=t.depth[i]?t.depth[a]:t.depth[i])+1,r[2*a+1]=r[2*i+1]=n,t.heap[1]=n++,p(t,r,1)}while(t.heap_len>=2);t.heap[--t.heap_max]=t.heap[1],_(t,e),u(r,h,t.bl_count)}function y(t,e,a){var i,n,r=-1,s=e[1],o=0,l=7,h=4;for(0===s&&(l=138,h=3),e[2*(a+1)+1]=65535,i=0;i<=a;i++)n=s,s=e[2*(i+1)+1],++o<l&&n===s||(o<h?t.bl_tree[2*n]+=o:0!==n?(n!==r&&t.bl_tree[2*n]++,t.bl_tree[2*q]++):o<=10?t.bl_tree[2*G]++:t.bl_tree[2*X]++,o=0,r=n,0===s?(l=138,h=3):n===s?(l=6,h=3):(l=7,h=4))}function x(t,e,a){var i,n,r=-1,s=e[1],o=0,d=7,f=4;for(0===s&&(d=138,f=3),i=0;i<=a;i++)if(n=s,s=e[2*(i+1)+1],!(++o<d&&n===s)){if(o<f)do{h(t,n,t.bl_tree)}while(0!=--o);else 0!==n?(n!==r&&(h(t,n,t.bl_tree),o--),h(t,q,t.bl_tree),l(t,o-3,2)):o<=10?(h(t,G,t.bl_tree),l(t,o-3,3)):(h(t,X,t.bl_tree),l(t,o-11,7));o=0,r=n,0===s?(d=138,f=3):n===s?(d=6,f=3):(d=7,f=4)}}function z(t){var e;for(y(t,t.dyn_ltree,t.l_desc.max_code),y(t,t.dyn_dtree,t.d_desc.max_code),k(t,t.bl_desc),e=H-1;e>=3&&0===t.bl_tree[2*V[e]+1];e--);return t.opt_len+=3*(e+1)+5+5+4,e}function B(t,e,a,i){var n;for(l(t,e-257,5),l(t,a-1,5),l(t,i-4,4),n=0;n<i;n++)l(t,t.bl_tree[2*V[n]+1],3);x(t,t.dyn_ltree,e-1),x(t,t.dyn_dtree,a-1)}function S(t){var e,a=4093624447;for(e=0;e<=31;e++,a>>>=1)if(1&a&&0!==t.dyn_ltree[2*e])return R;if(0!==t.dyn_ltree[18]||0!==t.dyn_ltree[20]||0!==t.dyn_ltree[26])return C;for(e=32;e<T;e++)if(0!==t.dyn_ltree[2*e])return C;return R}function E(t,e,a,i){l(t,(O<<1)+(i?1:0),3),m(t,e,a,!0)}var A=t("../utils/common"),Z=4,R=0,C=1,N=2,O=0,D=1,I=2,U=29,T=256,F=T+1+U,L=30,H=19,j=2*F+1,K=15,M=16,P=7,Y=256,q=16,G=17,X=18,W=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],J=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],Q=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],V=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],$=new Array(2*(F+2));i($);var tt=new Array(2*L);i(tt);var et=new Array(512);i(et);var at=new Array(256);i(at);var it=new Array(U);i(it);var nt=new Array(L);i(nt);var rt,st,ot,lt=!1;a._tr_init=function(t){lt||(c(),lt=!0),t.l_desc=new r(t.dyn_ltree,rt),t.d_desc=new r(t.dyn_dtree,st),t.bl_desc=new r(t.bl_tree,ot),t.bi_buf=0,t.bi_valid=0,b(t)},a._tr_stored_block=E,a._tr_flush_block=function(t,e,a,i){var n,r,s=0;t.level>0?(t.strm.data_type===N&&(t.strm.data_type=S(t)),k(t,t.l_desc),k(t,t.d_desc),s=z(t),n=t.opt_len+3+7>>>3,(r=t.static_len+3+7>>>3)<=n&&(n=r)):n=r=a+5,a+4<=n&&-1!==e?E(t,e,a,i):t.strategy===Z||r===n?(l(t,(D<<1)+(i?1:0),3),v(t,$,tt)):(l(t,(I<<1)+(i?1:0),3),B(t,t.l_desc.max_code+1,t.d_desc.max_code+1,s+1),v(t,t.dyn_ltree,t.dyn_dtree)),b(t),i&&g(t)},a._tr_tally=function(t,e,a){return t.pending_buf[t.d_buf+2*t.last_lit]=e>>>8&255,t.pending_buf[t.d_buf+2*t.last_lit+1]=255&e,t.pending_buf[t.l_buf+t.last_lit]=255&a,t.last_lit++,0===e?t.dyn_ltree[2*a]++:(t.matches++,e--,t.dyn_ltree[2*(at[a]+T+1)]++,t.dyn_dtree[2*s(e)]++),t.last_lit===t.lit_bufsize-1},a._tr_align=function(t){l(t,D<<1,3),h(t,Y,$),f(t)}},{"../utils/common":3}],15:[function(t,e,a){"use strict";e.exports=function(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}},{}],"/":[function(t,e,a){"use strict";var i={};(0,t("./lib/utils/common").assign)(i,t("./lib/deflate"),t("./lib/inflate"),t("./lib/zlib/constants")),e.exports=i},{"./lib/deflate":1,"./lib/inflate":2,"./lib/utils/common":3,"./lib/zlib/constants":6}]},{},[])("/")});'use strict';tr.exportTo('tr.e.importer',function(){const GZIP_MEMBER_HEADER_ID_SIZE=3;const GZIP_HEADER_ID1=0x1f;const GZIP_HEADER_ID2=0x8b;const GZIP_DEFLATE_COMPRESSION=8;function _stringToUInt8Array(str){const array=new Uint8Array(str.length);for(let i=0;i<str.length;++i){array[i]=str.charCodeAt(i);} return array;} function GzipImporter(model,eventData){this.inflateAsTraceStream=false;if(typeof(eventData)==='string'||eventData instanceof String){eventData=_stringToUInt8Array(eventData);}else if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);}else if(eventData instanceof tr.b.InMemoryTraceStream){eventData=eventData.data;this.inflateAsTraceStream_=true;}else{throw new Error('Unknown gzip data format');} this.model_=model;this.gzipData_=eventData;} @@ -5251,7 +5251,7 @@ this.model_.samples.push(new tr.model.Sample(startInMs,'V8 PC',node,this.v8_thread_,undefined,1));},processDistortion_(distortionInPicoseconds){},processPlotRange_(start,end){},processV8Version_(major,minor,build,patch,candidate){},importEvents(){const logreader=new tr.e.importer.v8.LogReader({'timer-event':{parsers:[null,parseInt,parseInt],processor:this.processTimerEvent_.bind(this)},'shared-library':{parsers:[null,parseInt,parseInt],processor:this.processSharedLibrary_.bind(this)},'timer-event-start':{parsers:[null,parseInt],processor:this.processTimerEventStart_.bind(this)},'timer-event-end':{parsers:[null,parseInt],processor:this.processTimerEventEnd_.bind(this)},'code-creation':{parsers:[null,parseInt,parseInt,parseInt,null,'var-args'],processor:this.processCodeCreateEvent_.bind(this)},'code-move':{parsers:[parseInt,parseInt],processor:this.processCodeMoveEvent_.bind(this)},'code-delete':{parsers:[parseInt],processor:this.processCodeDeleteEvent_.bind(this)},'cpp':{parsers:[parseInt,parseInt,null],processor:this.processCppSymbol_.bind(this)},'tick':{parsers:[parseInt,parseInt,parseInt,parseInt,parseInt,'var-args'],processor:this.processTickEvent_.bind(this)},'distortion':{parsers:[parseInt],processor:this.processDistortion_.bind(this)},'plot-range':{parsers:[parseInt,parseInt],processor:this.processPlotRange_.bind(this)},'v8-version':{parsers:[parseInt,parseInt,parseInt,parseInt,parseInt],processor:this.processV8Version_.bind(this)}});this.v8_timer_thread_=this.model_.getOrCreateProcess(-32).getOrCreateThread(1);this.v8_timer_thread_.name='V8 Timers';this.v8_thread_=this.model_.getOrCreateProcess(-32).getOrCreateThread(2);this.v8_thread_.name='V8';const lines=this.logData_.split('\n');for(let i=0;i<lines.length;i++){logreader.processLogLine(lines[i]);} function addSlices(slices,thread){for(let i=0;i<slices.length;i++){const duration=slices[i].end-slices[i].start;const slice=new tr.model.ThreadSlice('v8',slices[i].name,ColorScheme.getColorIdForGeneralPurposeString(slices[i].name),slices[i].start,{},duration);thread.sliceGroup.pushSlice(slice);addSlices(slices[i].children,thread);}} addSlices(this.v8_stack_timeline_,this.v8_thread_);}};tr.importer.Importer.register(V8LogImporter);return{V8LogImporter,};});'use strict';if(tr.isVinn){global.window={};} -!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,f,g,h,i,j="",k=0;k<a.length;)b=a.charCodeAt(k++),c=a.charCodeAt(k++),e=a.charCodeAt(k++),f=b>>2,g=(3&b)<<4|c>>4,h=(15&c)<<2|e>>6,i=63&e,isNaN(c)?h=i=64:isNaN(e)&&(i=64),j=j+d.charAt(f)+d.charAt(g)+d.charAt(h)+d.charAt(i);return j},c.decode=function(a){var b,c,e,f,g,h,i,j="",k=0;for(a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");k<a.length;)f=d.indexOf(a.charAt(k++)),g=d.indexOf(a.charAt(k++)),h=d.indexOf(a.charAt(k++)),i=d.indexOf(a.charAt(k++)),b=f<<2|g>>4,c=(15&g)<<4|h>>2,e=(3&h)<<6|i,j+=String.fromCharCode(b),64!=h&&(j+=String.fromCharCode(c)),64!=i&&(j+=String.fromCharCode(e));return j}},{}],2:[function(a,b){"use strict";function c(){this.compressedSize=0,this.uncompressedSize=0,this.crc32=0,this.compressionMethod=null,this.compressedContent=null}c.prototype={getContent:function(){return null},getCompressedContent:function(){return null}},b.exports=c},{}],3:[function(a,b,c){"use strict";c.STORE={magic:"\x00\x00",compress:function(a){return a},uncompress:function(a){return a},compressInputType:null,uncompressInputType:null},c.DEFLATE=a("./flate")},{"./flate":8}],4:[function(a,b){"use strict";var c=a("./utils"),d=[0,1996959894,3993919788,2567524794,124634137,1886057615,3915621685,2657392035,249268274,2044508324,3772115230,2547177864,162941995,2125561021,3887607047,2428444049,498536548,1789927666,4089016648,2227061214,450548861,1843258603,4107580753,2211677639,325883990,1684777152,4251122042,2321926636,335633487,1661365465,4195302755,2366115317,997073096,1281953886,3579855332,2724688242,1006888145,1258607687,3524101629,2768942443,901097722,1119000684,3686517206,2898065728,853044451,1172266101,3705015759,2882616665,651767980,1373503546,3369554304,3218104598,565507253,1454621731,3485111705,3099436303,671266974,1594198024,3322730930,2970347812,795835527,1483230225,3244367275,3060149565,1994146192,31158534,2563907772,4023717930,1907459465,112637215,2680153253,3904427059,2013776290,251722036,2517215374,3775830040,2137656763,141376813,2439277719,3865271297,1802195444,476864866,2238001368,4066508878,1812370925,453092731,2181625025,4111451223,1706088902,314042704,2344532202,4240017532,1658658271,366619977,2362670323,4224994405,1303535960,984961486,2747007092,3569037538,1256170817,1037604311,2765210733,3554079995,1131014506,879679996,2909243462,3663771856,1141124467,855842277,2852801631,3708648649,1342533948,654459306,3188396048,3373015174,1466479909,544179635,3110523913,3462522015,1591671054,702138776,2966460450,3352799412,1504918807,783551873,3082640443,3233442989,3988292384,2596254646,62317068,1957810842,3939845945,2647816111,81470997,1943803523,3814918930,2489596804,225274430,2053790376,3826175755,2466906013,167816743,2097651377,4027552580,2265490386,503444072,1762050814,4150417245,2154129355,426522225,1852507879,4275313526,2312317920,282753626,1742555852,4189708143,2394877945,397917763,1622183637,3604390888,2714866558,953729732,1340076626,3518719985,2797360999,1068828381,1219638859,3624741850,2936675148,906185462,1090812512,3747672003,2825379669,829329135,1181335161,3412177804,3160834842,628085408,1382605366,3423369109,3138078467,570562233,1426400815,3317316542,2998733608,733239954,1555261956,3268935591,3050360625,752459403,1541320221,2607071920,3965973030,1969922972,40735498,2617837225,3943577151,1913087877,83908371,2512341634,3803740692,2075208622,213261112,2463272603,3855990285,2094854071,198958881,2262029012,4057260610,1759359992,534414190,2176718541,4139329115,1873836001,414664567,2282248934,4279200368,1711684554,285281116,2405801727,4167216745,1634467795,376229701,2685067896,3608007406,1308918612,956543938,2808555105,3495958263,1231636301,1047427035,2932959818,3654703836,1088359270,936918e3,2847714899,3736837829,1202900863,817233897,3183342108,3401237130,1404277552,615818150,3134207493,3453421203,1423857449,601450431,3009837614,3294710456,1567103746,711928724,3020668471,3272380065,1510334235,755167117];b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var e="string"!==c.getTypeOf(a);"undefined"==typeof b&&(b=0);var f=0,g=0,h=0;b=-1^b;for(var i=0,j=a.length;j>i;i++)h=e?a[i]:a.charCodeAt(i),g=255&(b^h),f=d[g],b=b>>>8^f;return-1^b}},{"./utils":21}],5:[function(a,b){"use strict";function c(){this.data=null,this.length=0,this.index=0}var d=a("./utils");c.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<a||0>a)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return d.transformTo("string",this.readData(a))},readData:function(){},lastIndexOfSignature:function(){},readDate:function(){var a=this.readInt(4);return new Date((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1)}},b.exports=c},{"./utils":21}],6:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!1,c.date=null,c.compression=null,c.comment=null},{}],7:[function(a,b,c){"use strict";var d=a("./utils");c.string2binary=function(a){return d.string2binary(a)},c.string2Uint8Array=function(a){return d.transformTo("uint8array",a)},c.uint8Array2String=function(a){return d.transformTo("string",a)},c.string2Blob=function(a){var b=d.transformTo("arraybuffer",a);return d.arrayBuffer2Blob(b)},c.arrayBuffer2Blob=function(a){return d.arrayBuffer2Blob(a)},c.transformTo=function(a,b){return d.transformTo(a,b)},c.getTypeOf=function(a){return d.getTypeOf(a)},c.checkSupport=function(a){return d.checkSupport(a)},c.MAX_VALUE_16BITS=d.MAX_VALUE_16BITS,c.MAX_VALUE_32BITS=d.MAX_VALUE_32BITS,c.pretty=function(a){return d.pretty(a)},c.findCompression=function(a){return d.findCompression(a)},c.isRegExp=function(a){return d.isRegExp(a)}},{"./utils":21}],8:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,e=a("pako");c.uncompressInputType=d?"uint8array":"array",c.compressInputType=d?"uint8array":"array",c.magic="\b\x00",c.compress=function(a){return e.deflateRaw(a)},c.uncompress=function(a){return e.inflateRaw(a)}},{pako:24}],9:[function(a,b){"use strict";function c(a,b){return this instanceof c?(this.files={},this.comment=null,this.root="",a&&this.load(a,b),void(this.clone=function(){var a=new c;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a})):new c(a,b)}var d=a("./base64");c.prototype=a("./object"),c.prototype.load=a("./load"),c.support=a("./support"),c.defaults=a("./defaults"),c.utils=a("./deprecatedPublicUtils"),c.base64={encode:function(a){return d.encode(a)},decode:function(a){return d.decode(a)}},c.compressions=a("./compressions"),b.exports=c},{"./base64":1,"./compressions":3,"./defaults":6,"./deprecatedPublicUtils":7,"./load":10,"./object":13,"./support":17}],10:[function(a,b){"use strict";var c=a("./base64"),d=a("./zipEntries");b.exports=function(a,b){var e,f,g,h;for(b=b||{},b.base64&&(a=c.decode(a)),f=new d(a,b),e=f.files,g=0;g<e.length;g++)h=e[g],this.file(h.fileName,h.decompressed,{binary:!0,optimizedBinaryString:!0,date:h.date,dir:h.dir,comment:h.fileComment.length?h.fileComment:null,createFolders:b.createFolders});return f.zipComment.length&&(this.comment=f.zipComment),this}},{"./base64":1,"./zipEntries":22}],11:[function(a,b){(function(a){"use strict";b.exports=function(b,c){return new a(b,c)},b.exports.test=function(b){return a.isBuffer(b)}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],12:[function(a,b){"use strict";function c(a){this.data=a,this.length=this.data.length,this.index=0}var d=a("./uint8ArrayReader");c.prototype=new d,c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./uint8ArrayReader":18}],13:[function(a,b){"use strict";var c=a("./support"),d=a("./utils"),e=a("./crc32"),f=a("./signature"),g=a("./defaults"),h=a("./base64"),i=a("./compressions"),j=a("./compressedObject"),k=a("./nodeBuffer"),l=a("./utf8"),m=a("./stringWriter"),n=a("./uint8ArrayWriter"),o=function(a){if(a._data instanceof j&&(a._data=a._data.getContent(),a.options.binary=!0,a.options.base64=!1,"uint8array"===d.getTypeOf(a._data))){var b=a._data;a._data=new Uint8Array(b.length),0!==b.length&&a._data.set(b,0)}return a._data},p=function(a){var b=o(a),e=d.getTypeOf(b);return"string"===e?!a.options.binary&&c.nodebuffer?k(b,"utf-8"):a.asBinary():b},q=function(a){var b=o(this);return null===b||"undefined"==typeof b?"":(this.options.base64&&(b=h.decode(b)),b=a&&this.options.binary?A.utf8decode(b):d.transformTo("string",b),a||this.options.binary||(b=d.transformTo("string",A.utf8encode(b))),b)},r=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this._data=b,this.options=c,this._initialMetadata={dir:c.dir,date:c.date}};r.prototype={asText:function(){return q.call(this,!0)},asBinary:function(){return q.call(this,!1)},asNodeBuffer:function(){var a=p(this);return d.transformTo("nodebuffer",a)},asUint8Array:function(){var a=p(this);return d.transformTo("uint8array",a)},asArrayBuffer:function(){return this.asUint8Array().buffer}};var s=function(a,b){var c,d="";for(c=0;b>c;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},t=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},u=function(a){return a=a||{},a.base64!==!0||null!==a.binary&&void 0!==a.binary||(a.binary=!0),a=t(a,g),a.date=a.date||new Date,null!==a.compression&&(a.compression=a.compression.toUpperCase()),a},v=function(a,b,c){var e,f=d.getTypeOf(b);if(c=u(c),c.createFolders&&(e=w(a))&&x.call(this,e,!0),c.dir||null===b||"undefined"==typeof b)c.base64=!1,c.binary=!1,b=null;else if("string"===f)c.binary&&!c.base64&&c.optimizedBinaryString!==!0&&(b=d.string2binary(b));else{if(c.base64=!1,c.binary=!0,!(f||b instanceof j))throw new Error("The data of '"+a+"' is in an unsupported format !");"arraybuffer"===f&&(b=d.transformTo("uint8array",b))}var g=new r(a,b,c);return this.files[a]=g,g},w=function(a){"/"==a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},x=function(a,b){return"/"!=a.slice(-1)&&(a+="/"),b="undefined"!=typeof b?b:!1,this.files[a]||v.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},y=function(a,b){var c,f=new j;return a._data instanceof j?(f.uncompressedSize=a._data.uncompressedSize,f.crc32=a._data.crc32,0===f.uncompressedSize||a.dir?(b=i.STORE,f.compressedContent="",f.crc32=0):a._data.compressionMethod===b.magic?f.compressedContent=a._data.getCompressedContent():(c=a._data.getContent(),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c)))):(c=p(a),(!c||0===c.length||a.dir)&&(b=i.STORE,c=""),f.uncompressedSize=c.length,f.crc32=e(c),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c))),f.compressedSize=f.compressedContent.length,f.compressionMethod=b.magic,f},z=function(a,b,c,g){var h,i,j,k,m=(c.compressedContent,d.transformTo("string",l.utf8encode(b.name))),n=b.comment||"",o=d.transformTo("string",l.utf8encode(n)),p=m.length!==b.name.length,q=o.length!==n.length,r=b.options,t="",u="",v="";j=b._initialMetadata.dir!==b.dir?b.dir:r.dir,k=b._initialMetadata.date!==b.date?b.date:r.date,h=k.getHours(),h<<=6,h|=k.getMinutes(),h<<=5,h|=k.getSeconds()/2,i=k.getFullYear()-1980,i<<=4,i|=k.getMonth()+1,i<<=5,i|=k.getDate(),p&&(u=s(1,1)+s(e(m),4)+m,t+="up"+s(u.length,2)+u),q&&(v=s(1,1)+s(this.crc32(o),4)+o,t+="uc"+s(v.length,2)+v);var w="";w+="\n\x00",w+=p||q?"\x00\b":"\x00\x00",w+=c.compressionMethod,w+=s(h,2),w+=s(i,2),w+=s(c.crc32,4),w+=s(c.compressedSize,4),w+=s(c.uncompressedSize,4),w+=s(m.length,2),w+=s(t.length,2);var x=f.LOCAL_FILE_HEADER+w+m+t,y=f.CENTRAL_FILE_HEADER+"\x00"+w+s(o.length,2)+"\x00\x00\x00\x00"+(j===!0?"\x00\x00\x00":"\x00\x00\x00\x00")+s(g,4)+m+t+o;return{fileRecord:x,dirRecord:y,compressedObject:c}},A={load:function(){throw new Error("Load method is not defined. Is the file jszip-load.js included ?")},filter:function(a){var b,c,d,e,f=[];for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],e=new r(d.name,d._data,t(d.options)),c=b.slice(this.root.length,b.length),b.slice(0,this.root.length)===this.root&&a(c,e)&&f.push(e));return f},file:function(a,b,c){if(1===arguments.length){if(d.isRegExp(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}return this.filter(function(b,c){return!c.dir&&b===a})[0]||null}return a=this.root+a,v.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d.isRegExp(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=x.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!=a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){a=t(a||{},{base64:!0,compression:"STORE",type:"base64",comment:null}),d.checkSupport(a.type);var b,c,e=[],g=0,j=0,k=d.transformTo("string",this.utf8encode(a.comment||this.comment||""));for(var l in this.files)if(this.files.hasOwnProperty(l)){var o=this.files[l],p=o.options.compression||a.compression.toUpperCase(),q=i[p];if(!q)throw new Error(p+" is not a valid compression method !");var r=y.call(this,o,q),u=z.call(this,l,o,r,g);g+=u.fileRecord.length+r.compressedSize,j+=u.dirRecord.length,e.push(u)}var v="";v=f.CENTRAL_DIRECTORY_END+"\x00\x00\x00\x00"+s(e.length,2)+s(e.length,2)+s(j,4)+s(g,4)+s(k.length,2)+k;var w=a.type.toLowerCase();for(b="uint8array"===w||"arraybuffer"===w||"blob"===w||"nodebuffer"===w?new n(g+j+v.length):new m(g+j+v.length),c=0;c<e.length;c++)b.append(e[c].fileRecord),b.append(e[c].compressedObject.compressedContent);for(c=0;c<e.length;c++)b.append(e[c].dirRecord);b.append(v);var x=b.finalize();switch(a.type.toLowerCase()){case"uint8array":case"arraybuffer":case"nodebuffer":return d.transformTo(a.type.toLowerCase(),x);case"blob":return d.arrayBuffer2Blob(d.transformTo("arraybuffer",x));case"base64":return a.base64?h.encode(x):x;default:return x}},crc32:function(a,b){return e(a,b)},utf8encode:function(a){return d.transformTo("string",l.utf8encode(a))},utf8decode:function(a){return l.utf8decode(a)}};b.exports=A},{"./base64":1,"./compressedObject":2,"./compressions":3,"./crc32":4,"./defaults":6,"./nodeBuffer":11,"./signature":14,"./stringWriter":16,"./support":17,"./uint8ArrayWriter":19,"./utf8":20,"./utils":21}],14:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],15:[function(a,b){"use strict";function c(a,b){this.data=a,b||(this.data=e.string2binary(this.data)),this.length=this.data.length,this.index=0}var d=a("./dataReader"),e=a("./utils");c.prototype=new d,c.prototype.byteAt=function(a){return this.data.charCodeAt(a)},c.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)},c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5,"./utils":21}],16:[function(a,b){"use strict";var c=a("./utils"),d=function(){this.data=[]};d.prototype={append:function(a){a=c.transformTo("string",a),this.data.push(a)},finalize:function(){return this.data.join("")}},b.exports=d},{"./utils":21}],17:[function(a,b,c){(function(a){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof a,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var b=new ArrayBuffer(0);try{c.blob=0===new Blob([b],{type:"application/zip"}).size}catch(d){try{var e=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,f=new e;f.append(b),c.blob=0===f.getBlob("application/zip").size}catch(d){c.blob=!1}}}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],18:[function(a,b){"use strict";function c(a){a&&(this.data=a,this.length=this.data.length,this.index=0)}var d=a("./dataReader");c.prototype=new d,c.prototype.byteAt=function(a){return this.data[a]},c.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f;return-1},c.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5}],19:[function(a,b){"use strict";var c=a("./utils"),d=function(a){this.data=new Uint8Array(a),this.index=0};d.prototype={append:function(a){0!==a.length&&(a=c.transformTo("uint8array",a),this.data.set(a,this.index),this.index+=a.length)},finalize:function(){return this.data}},b.exports=d},{"./utils":21}],20:[function(a,b,c){"use strict";for(var d=a("./utils"),e=a("./support"),f=a("./nodeBuffer"),g=new Array(256),h=0;256>h;h++)g[h]=h>=252?6:h>=248?5:h>=240?4:h>=224?3:h>=192?2:1;g[254]=g[254]=1;var i=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=e.uint8array?new Uint8Array(i):new Array(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},j=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+g[a[c]]>b?c:b},k=function(a){var b,c,e,f,h=a.length,i=new Array(2*h);for(c=0,b=0;h>b;)if(e=a[b++],128>e)i[c++]=e;else if(f=g[e],f>4)i[c++]=65533,b+=f-1;else{for(e&=2===f?31:3===f?15:7;f>1&&h>b;)e=e<<6|63&a[b++],f--;f>1?i[c++]=65533:65536>e?i[c++]=e:(e-=65536,i[c++]=55296|e>>10&1023,i[c++]=56320|1023&e)}return i.length!==c&&(i.subarray?i=i.subarray(0,c):i.length=c),d.applyFromCharCode(i)};c.utf8encode=function(a){return e.nodebuffer?f(a,"utf-8"):i(a)},c.utf8decode=function(a){if(e.nodebuffer)return d.transformTo("nodebuffer",a).toString("utf-8");a=d.transformTo(e.uint8array?"uint8array":"array",a);for(var b=[],c=0,f=a.length,g=65536;f>c;){var h=j(a,Math.min(c+g,f));b.push(e.uint8array?k(a.subarray(c,h)):k(a.slice(c,h))),c=h}return b.join("")}},{"./nodeBuffer":11,"./support":17,"./utils":21}],21:[function(a,b,c){"use strict";function d(a){return a}function e(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function f(a){var b=65536,d=[],e=a.length,f=c.getTypeOf(a),g=0,h=!0;try{switch(f){case"uint8array":String.fromCharCode.apply(null,new Uint8Array(0));break;case"nodebuffer":String.fromCharCode.apply(null,j(0))}}catch(i){h=!1}if(!h){for(var k="",l=0;l<a.length;l++)k+=String.fromCharCode(a[l]);return k}for(;e>g&&b>1;)try{d.push("array"===f||"nodebuffer"===f?String.fromCharCode.apply(null,a.slice(g,Math.min(g+b,e))):String.fromCharCode.apply(null,a.subarray(g,Math.min(g+b,e)))),g+=b}catch(i){b=Math.floor(b/2)}return d.join("")}function g(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];return b}var h=a("./support"),i=a("./compressions"),j=a("./nodeBuffer");c.string2binary=function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(255&a.charCodeAt(c));return b},c.arrayBuffer2Blob=function(a){c.checkSupport("blob");try{return new Blob([a],{type:"application/zip"})}catch(b){try{var d=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,e=new d;return e.append(a),e.getBlob("application/zip")}catch(b){throw new Error("Bug : can't construct the Blob.")}}},c.applyFromCharCode=f;var k={};k.string={string:d,array:function(a){return e(a,new Array(a.length))},arraybuffer:function(a){return k.string.uint8array(a).buffer},uint8array:function(a){return e(a,new Uint8Array(a.length))},nodebuffer:function(a){return e(a,j(a.length))}},k.array={string:f,array:d,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(a)}},k.arraybuffer={string:function(a){return f(new Uint8Array(a))},array:function(a){return g(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:d,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(new Uint8Array(a))}},k.uint8array={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:d,nodebuffer:function(a){return j(a)}},k.nodebuffer={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return k.nodebuffer.uint8array(a).buffer},uint8array:function(a){return g(a,new Uint8Array(a.length))},nodebuffer:d},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=k[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":h.nodebuffer&&j.test(a)?"nodebuffer":h.uint8array&&a instanceof Uint8Array?"uint8array":h.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=h[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this browser")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(16>b?"0":"")+b.toString(16).toUpperCase();return d},c.findCompression=function(a){for(var b in i)if(i.hasOwnProperty(b)&&i[b].magic===a)return i[b];return null},c.isRegExp=function(a){return"[object RegExp]"===Object.prototype.toString.call(a)}},{"./compressions":3,"./nodeBuffer":11,"./support":17}],22:[function(a,b){"use strict";function c(a,b){this.files=[],this.loadOptions=b,a&&this.load(a)}var d=a("./stringReader"),e=a("./nodeBufferReader"),f=a("./uint8ArrayReader"),g=a("./utils"),h=a("./signature"),i=a("./zipEntry"),j=a("./support"),k=a("./object");c.prototype={checkSignature:function(a){var b=this.reader.readString(4);if(b!==a)throw new Error("Corrupted zip or bug : unexpected signature ("+g.pretty(b)+", expected "+g.pretty(a)+")")},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2),this.zipComment=this.reader.readString(this.zipCommentLength),this.zipComment=k.utf8decode(this.zipComment)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.versionMadeBy=this.reader.readString(2),this.versionNeeded=this.reader.readInt(2),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;d>e;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readString(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(h.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readString(4)===h.CENTRAL_FILE_HEADER;)a=new i({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(h.CENTRAL_DIRECTORY_END);if(-1===a)throw new Error("Corrupted zip : can't find end of central directory");if(this.reader.setIndex(a),this.checkSignature(h.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===g.MAX_VALUE_16BITS||this.diskWithCentralDirStart===g.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===g.MAX_VALUE_16BITS||this.centralDirRecords===g.MAX_VALUE_16BITS||this.centralDirSize===g.MAX_VALUE_32BITS||this.centralDirOffset===g.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),-1===a)throw new Error("Corrupted zip : can't find the ZIP64 end of central directory locator");this.reader.setIndex(a),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}},prepareReader:function(a){var b=g.getTypeOf(a);this.reader="string"!==b||j.uint8array?"nodebuffer"===b?new e(a):new f(g.transformTo("uint8array",a)):new d(a,this.loadOptions.optimizedBinaryString)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=c},{"./nodeBufferReader":12,"./object":13,"./signature":14,"./stringReader":15,"./support":17,"./uint8ArrayReader":18,"./utils":21,"./zipEntry":23}],23:[function(a,b){"use strict";function c(a,b){this.options=a,this.loadOptions=b}var d=a("./stringReader"),e=a("./utils"),f=a("./compressedObject"),g=a("./object");c.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},prepareCompressedContent:function(a,b,c){return function(){var d=a.index;a.setIndex(b);var e=a.readData(c);return a.setIndex(d),e}},prepareContent:function(a,b,c,d,f){return function(){var a=e.transformTo(d.uncompressInputType,this.getCompressedContent()),b=d.uncompress(a);if(b.length!==f)throw new Error("Bug : uncompressed data size mismatch");return b}},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readString(this.fileNameLength),a.skip(c),-1==this.compressedSize||-1==this.uncompressedSize)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize == -1 || uncompressedSize == -1)");if(b=e.findCompression(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+e.pretty(this.compressionMethod)+" unknown (inner file : "+this.fileName+")");if(this.decompressed=new f,this.decompressed.compressedSize=this.compressedSize,this.decompressed.uncompressedSize=this.uncompressedSize,this.decompressed.crc32=this.crc32,this.decompressed.compressionMethod=this.compressionMethod,this.decompressed.getCompressedContent=this.prepareCompressedContent(a,a.index,this.compressedSize,b),this.decompressed.getContent=this.prepareContent(a,a.index,this.compressedSize,b,this.uncompressedSize),this.loadOptions.checkCRC32&&(this.decompressed=e.transformTo("string",this.decompressed.getContent()),g.crc32(this.decompressed)!==this.crc32))throw new Error("Corrupted zip : CRC32 mismatch")},readCentralPart:function(a){if(this.versionMadeBy=a.readString(2),this.versionNeeded=a.readInt(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4),this.fileNameLength=a.readInt(2),this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");this.fileName=a.readString(this.fileNameLength),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readString(this.fileCommentLength),this.dir=16&this.externalFileAttributes?!0:!1},parseZIP64ExtraField:function(){if(this.extraFields[1]){var a=new d(this.extraFields[1].value);this.uncompressedSize===e.MAX_VALUE_32BITS&&(this.uncompressedSize=a.readInt(8)),this.compressedSize===e.MAX_VALUE_32BITS&&(this.compressedSize=a.readInt(8)),this.localHeaderOffset===e.MAX_VALUE_32BITS&&(this.localHeaderOffset=a.readInt(8)),this.diskNumberStart===e.MAX_VALUE_32BITS&&(this.diskNumberStart=a.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index;for(this.extraFields=this.extraFields||{};a.index<e+this.extraFieldsLength;)b=a.readInt(2),c=a.readInt(2),d=a.readString(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){if(this.useUTF8())this.fileName=g.utf8decode(this.fileName),this.fileComment=g.utf8decode(this.fileComment);else{var a=this.findExtraFieldUnicodePath();null!==a&&(this.fileName=a);var b=this.findExtraFieldUnicodeComment();null!==b&&(this.fileComment=b)}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileName)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileComment)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null}},b.exports=c},{"./compressedObject":2,"./object":13,"./stringReader":15,"./utils":21}],24:[function(a,b){"use strict";var c=a("./lib/utils/common").assign,d=a("./lib/deflate"),e=a("./lib/inflate"),f=a("./lib/zlib/constants"),g={};c(g,d,e,f),b.exports=g},{"./lib/deflate":25,"./lib/inflate":26,"./lib/utils/common":27,"./lib/zlib/constants":30}],25:[function(a,b,c){"use strict";function d(a,b){var c=new s(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}function f(a,b){return b=b||{},b.gzip=!0,d(a,b)}var g=a("./zlib/deflate.js"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=0,m=4,n=0,o=1,p=-1,q=0,r=8,s=function(a){this.options=h.assign({level:p,method:r,chunkSize:16384,windowBits:15,memLevel:8,strategy:q,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=g.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==n)throw new Error(j[c]);b.header&&g.deflateSetHeader(this.strm,b.header)};s.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?m:l,e.input="string"==typeof a?i.string2buf(a):a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new h.Buf8(f),e.next_out=0,e.avail_out=f),c=g.deflate(e,d),c!==o&&c!==n)return this.onEnd(c),this.ended=!0,!1;(0===e.avail_out||0===e.avail_in&&d===m)&&this.onData("string"===this.options.to?i.buf2binstring(h.shrinkBuf(e.output,e.next_out)):h.shrinkBuf(e.output,e.next_out))}while((e.avail_in>0||0===e.avail_out)&&c!==o);return d===m?(c=g.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===n):!0},s.prototype.onData=function(a){this.chunks.push(a)},s.prototype.onEnd=function(a){a===n&&(this.result="string"===this.options.to?this.chunks.join(""):h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=s,c.deflate=d,c.deflateRaw=e,c.gzip=f},{"./utils/common":27,"./utils/strings":28,"./zlib/deflate.js":32,"./zlib/messages":37,"./zlib/zstream":39}],26:[function(a,b,c){"use strict";function d(a,b){var c=new m(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}var f=a("./zlib/inflate.js"),g=a("./utils/common"),h=a("./utils/strings"),i=a("./zlib/constants"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=a("./zlib/gzheader"),m=function(a){this.options=g.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=f.inflateInit2(this.strm,b.windowBits);if(c!==i.Z_OK)throw new Error(j[c]);this.header=new l,f.inflateGetHeader(this.strm,this.header)};m.prototype.push=function(a,b){var c,d,e,j,k,l=this.strm,m=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?i.Z_FINISH:i.Z_NO_FLUSH,l.input="string"==typeof a?h.binstring2buf(a):a,l.next_in=0,l.avail_in=l.input.length;do{if(0===l.avail_out&&(l.output=new g.Buf8(m),l.next_out=0,l.avail_out=m),c=f.inflate(l,i.Z_NO_FLUSH),c!==i.Z_STREAM_END&&c!==i.Z_OK)return this.onEnd(c),this.ended=!0,!1;l.next_out&&(0===l.avail_out||c===i.Z_STREAM_END||0===l.avail_in&&d===i.Z_FINISH)&&("string"===this.options.to?(e=h.utf8border(l.output,l.next_out),j=l.next_out-e,k=h.buf2string(l.output,e),l.next_out=j,l.avail_out=m-j,j&&g.arraySet(l.output,l.output,e,j,0),this.onData(k)):this.onData(g.shrinkBuf(l.output,l.next_out)))}while(l.avail_in>0&&c!==i.Z_STREAM_END);return c===i.Z_STREAM_END&&(d=i.Z_FINISH),d===i.Z_FINISH?(c=f.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===i.Z_OK):!0},m.prototype.onData=function(a){this.chunks.push(a)},m.prototype.onEnd=function(a){a===i.Z_OK&&(this.result="string"===this.options.to?this.chunks.join(""):g.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=m,c.inflate=d,c.inflateRaw=e,c.ungzip=d},{"./utils/common":27,"./utils/strings":28,"./zlib/constants":30,"./zlib/gzheader":33,"./zlib/inflate.js":35,"./zlib/messages":37,"./zlib/zstream":39}],27:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;c>b;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;c>b;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],28:[function(a,b,c){"use strict";function d(a,b){if(65537>b&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;b>d;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;256>j;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=new e.Buf8(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;d>c;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;h>c;)if(f=a[c++],128>f)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&h>c;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:65536>f?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":27}],29:[function(a,b){"use strict";function c(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0}b.exports=c},{}],30:[function(a,b){b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],31:[function(a,b){"use strict";function c(){for(var a,b=[],c=0;256>c;c++){a=c;for(var d=0;8>d;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function d(a,b,c,d){var f=e,g=d+c;a=-1^a;for(var h=d;g>h;h++)a=a>>>8^f[255&(a^b[h])];return-1^a}var e=c();b.exports=d},{}],32:[function(a,b,c){"use strict";function d(a,b){return a.msg=G[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(C.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){D._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,C.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=E(a.adler,b,e,c):2===a.state.wrap&&(a.adler=F(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-jb?a.strstart-(a.w_size-jb):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ib,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&m>f);if(d=ib-(m-f),f=m-ib,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-jb)){C.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=hb)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+hb-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<hb)););}while(a.lookahead<jb&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===H)return sb;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return sb;if(a.strstart-a.block_start>=a.w_size-jb&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?sb:sb}function o(a,b){for(var c,d;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c)),a.match_length>=hb)if(d=D._tr_tally(a,a.strstart-a.match_start,a.match_length-hb),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=hb){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function p(a,b){for(var c,d,e;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=hb-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===S||a.match_length===hb&&a.strstart-a.match_start>4096)&&(a.match_length=hb-1)),a.prev_length>=hb&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-hb,d=D._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-hb),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=hb-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return sb}else if(a.match_available){if(d=D._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return sb}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=D._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ib){if(m(a),a.lookahead<=ib&&b===H)return sb;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=hb&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ib;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&f>e);a.match_length=ib-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=hb?(c=D._tr_tally(a,1,a.match_length-hb),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===H)return sb;break}if(a.match_length=0,c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function s(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=B[a.level].max_lazy,a.good_match=B[a.level].good_length,a.nice_match=B[a.level].nice_length,a.max_chain_length=B[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=hb-1,a.match_available=0,a.ins_h=0}function t(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=Y,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new C.Buf16(2*fb),this.dyn_dtree=new C.Buf16(2*(2*db+1)),this.bl_tree=new C.Buf16(2*(2*eb+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new C.Buf16(gb+1),this.heap=new C.Buf16(2*cb+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new C.Buf16(2*cb+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function u(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=X,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?lb:qb,a.adler=2===b.wrap?0:1,b.last_flush=H,D._tr_init(b),M):d(a,O)}function v(a){var b=u(a);return b===M&&s(a.state),b}function w(a,b){return a&&a.state?2!==a.state.wrap?O:(a.state.gzhead=b,M):O}function x(a,b,c,e,f,g){if(!a)return O;var h=1;if(b===R&&(b=6),0>e?(h=0,e=-e):e>15&&(h=2,e-=16),1>f||f>Z||c!==Y||8>e||e>15||0>b||b>9||0>g||g>V)return d(a,O);8===e&&(e=9);var i=new t;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+hb-1)/hb),i.window=new C.Buf8(2*i.w_size),i.head=new C.Buf16(i.hash_size),i.prev=new C.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new C.Buf8(i.pending_buf_size),i.d_buf=i.lit_bufsize>>1,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,v(a)}function y(a,b){return x(a,b,Y,$,_,W)}function z(a,b){var c,h,k,l;if(!a||!a.state||b>L||0>b)return a?d(a,O):O;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===rb&&b!==K)return d(a,0===a.avail_out?Q:O);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===lb)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=F(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=mb):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,wb),h.status=qb);else{var m=Y+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=T||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=kb),m+=31-m%31,h.status=qb,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===mb)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=nb)}else h.status=nb;if(h.status===nb)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=ob)}else h.status=ob;if(h.status===ob)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=pb)}else h.status=pb;if(h.status===pb&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=qb)):h.status=qb),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,M}else if(0===a.avail_in&&e(b)<=e(c)&&b!==K)return d(a,Q);if(h.status===rb&&0!==a.avail_in)return d(a,Q);if(0!==a.avail_in||0!==h.lookahead||b!==H&&h.status!==rb){var o=h.strategy===T?r(h,b):h.strategy===U?q(h,b):B[h.level].func(h,b);if((o===ub||o===vb)&&(h.status=rb),o===sb||o===ub)return 0===a.avail_out&&(h.last_flush=-1),M;if(o===tb&&(b===I?D._tr_align(h):b!==L&&(D._tr_stored_block(h,0,0,!1),b===J&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,M}return b!==K?M:h.wrap<=0?N:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?M:N)}function A(a){var b;return a&&a.state?(b=a.state.status,b!==lb&&b!==mb&&b!==nb&&b!==ob&&b!==pb&&b!==qb&&b!==rb?d(a,O):(a.state=null,b===qb?d(a,P):M)):O}var B,C=a("../utils/common"),D=a("./trees"),E=a("./adler32"),F=a("./crc32"),G=a("./messages"),H=0,I=1,J=3,K=4,L=5,M=0,N=1,O=-2,P=-3,Q=-5,R=-1,S=1,T=2,U=3,V=4,W=0,X=2,Y=8,Z=9,$=15,_=8,ab=29,bb=256,cb=bb+1+ab,db=30,eb=19,fb=2*cb+1,gb=15,hb=3,ib=258,jb=ib+hb+1,kb=32,lb=42,mb=69,nb=73,ob=91,pb=103,qb=113,rb=666,sb=1,tb=2,ub=3,vb=4,wb=3,xb=function(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e};B=[new xb(0,0,0,0,n),new xb(4,4,8,4,o),new xb(4,5,16,8,o),new xb(4,6,32,32,o),new xb(4,4,16,16,p),new xb(8,16,32,32,p),new xb(8,16,128,128,p),new xb(8,32,128,256,p),new xb(32,128,258,1024,p),new xb(32,258,258,4096,p)],c.deflateInit=y,c.deflateInit2=x,c.deflateReset=v,c.deflateResetKeep=u,c.deflateSetHeader=w,c.deflate=z,c.deflateEnd=A,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./messages":37,"./trees":38}],33:[function(a,b){"use strict";function c(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=c},{}],34:[function(a,b){"use strict";var c=30,d=12;b.exports=function(a,b){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;e=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=e.dmax,l=e.wsize,m=e.whave,n=e.wnext,o=e.window,p=e.hold,q=e.bits,r=e.lencode,s=e.distcode,t=(1<<e.lenbits)-1,u=(1<<e.distbits)-1;a:do{15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){e.mode=d;break a}a.msg="invalid literal/length code",e.mode=c;break a}x=65535&v,w&=15,w&&(w>q&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",e.mode=c;break a}if(y=65535&v,w&=15,w>q&&(p+=B[f++]<<q,q+=8,w>q&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",e.mode=c;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&e.correct){a.msg="invalid distance too far back",e.mode=c;break a}if(z=0,A=o,0===n){if(z+=l-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(w>n){if(z+=l+n-w,w-=n,x>w){x-=w;do C[h++]=o[z++];while(--w);if(z=0,x>n){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(g>f&&j>h);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=g>f?5+(g-f):5-(f-g),a.avail_out=j>h?257+(j-h):257-(h-j),e.hold=p,e.bits=q}},{}],35:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new r.Buf16(320),this.work=new r.Buf16(288),this.lendyn=null,this.distdyn=null,this.correct=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=K,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new r.Buf32(ob),b.distcode=b.distdyn=new r.Buf32(pb),b.correct=1,b.back=-1,C):F}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):F}function h(a,b){var c,d;return a&&a.state?(d=a.state,0>b?(c=0,b=-b):(c=(b>>4)+1,48>b&&(b&=15)),b&&(8>b||b>15)?F:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):F}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==C&&(a.state=null),c):F}function j(a){return i(a,rb)}function k(a){if(sb){var b;for(p=new r.Buf32(512),q=new r.Buf32(32),b=0;144>b;)a.lens[b++]=8;for(;256>b;)a.lens[b++]=9;for(;280>b;)a.lens[b++]=7;for(;288>b;)a.lens[b++]=8;for(v(x,a.lens,0,288,p,0,a.work,{bits:9}),b=0;32>b;)a.lens[b++]=5;v(y,a.lens,0,32,q,0,a.work,{bits:5}),sb=!1}a.lencode=p,a.lenbits=9,a.distcode=q,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new r.Buf8(f.wsize)),d>=f.wsize?(r.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),r.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(r.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,ob,pb,qb,rb,sb,tb,ub,vb,wb,xb,yb,zb,Ab=0,Bb=new r.Buf8(4),Cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return F;c=a.state,c.mode===V&&(c.mode=W),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xb=C;a:for(;;)switch(c.mode){case K:if(0===c.wrap){c.mode=W;break}for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0),m=0,n=0,c.mode=L;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=lb;break}if((15&m)!==J){a.msg="unknown compression method",c.mode=lb;break}if(m>>>=4,n-=4,wb=(15&m)+8,0===c.wbits)c.wbits=wb;else if(wb>c.wbits){a.msg="invalid window size",c.mode=lb;break}c.dmax=1<<wb,a.adler=c.check=1,c.mode=512&m?T:V,m=0,n=0;break;case L:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==J){a.msg="unknown compression method",c.mode=lb;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=lb;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=M;case M:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,Bb[2]=m>>>16&255,Bb[3]=m>>>24&255,c.check=t(c.check,Bb,4,0)),m=0,n=0,c.mode=N;case N:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=O;case O:if(1024&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=P;case P:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wb=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),r.arraySet(c.head.extra,e,g,q,wb)),512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=Q;case Q:if(2048&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.name+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=R;case R:if(4096&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.comment+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.comment=null);c.mode=S;case S:if(512&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=lb;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=V;break;case T:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=U;case U:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,E;a.adler=c.check=1,c.mode=V;case V:if(b===A||b===B)break a;case W:if(c.last){m>>>=7&n,n-=7&n,c.mode=ib;break}for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=X;break;case 1:if(k(c),c.mode=bb,b===B){m>>>=2,n-=2;break a}break;case 2:c.mode=$;break;case 3:a.msg="invalid block type",c.mode=lb}m>>>=2,n-=2;break;case X:for(m>>>=7&n,n-=7&n;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=lb;break}if(c.length=65535&m,m=0,n=0,c.mode=Y,b===B)break a;case Y:c.mode=Z;case Z:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;r.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=V;break;case $:for(;14>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=lb;break}c.have=0,c.mode=_;case _:for(;c.have<c.ncode;){for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Cb[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Cb[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,yb={bits:c.lenbits},xb=v(w,c.lens,0,19,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid code lengths set",c.mode=lb;break}c.have=0,c.mode=ab;case ab:for(;c.have<c.nlen+c.ndist;){for(;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(16>sb)m>>>=qb,n-=qb,c.lens[c.have++]=sb;else{if(16===sb){for(zb=qb+2;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qb,n-=qb,0===c.have){a.msg="invalid bit length repeat",c.mode=lb;break}wb=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sb){for(zb=qb+3;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=3+(7&m),m>>>=3,n-=3}else{for(zb=qb+7;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=lb;break}for(;q--;)c.lens[c.have++]=wb}}if(c.mode===lb)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=lb;break}if(c.lenbits=9,yb={bits:c.lenbits},xb=v(x,c.lens,0,c.nlen,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid literal/lengths set",c.mode=lb;break}if(c.distbits=6,c.distcode=c.distdyn,yb={bits:c.distbits},xb=v(y,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,yb),c.distbits=yb.bits,xb){a.msg="invalid distances set",c.mode=lb;break}if(c.mode=bb,b===B)break a;case bb:c.mode=cb;case cb:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,u(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===V&&(c.back=-1);break}for(c.back=0;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(rb&&0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.lencode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,c.length=sb,0===rb){c.mode=hb;break}if(32&rb){c.back=-1,c.mode=V;break}if(64&rb){a.msg="invalid literal/length code",c.mode=lb;break}c.extra=15&rb,c.mode=db;case db:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=eb;case eb:for(;Ab=c.distcode[m&(1<<c.distbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.distcode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,64&rb){a.msg="invalid distance code",c.mode=lb;break}c.offset=sb,c.extra=15&rb,c.mode=fb;case fb:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=lb;break}c.mode=gb;case gb:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.correct){a.msg="invalid distance too far back",c.mode=lb;break}q>c.wnext?(q-=c.wnext,ob=c.wsize-q):ob=c.wnext-q,q>c.length&&(q=c.length),pb=c.window}else pb=f,ob=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pb[ob++];while(--q);0===c.length&&(c.mode=cb);break;case hb:if(0===j)break a;f[h++]=c.length,j--,c.mode=cb;break;case ib:if(c.wrap){for(;32>n;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?t(c.check,f,p,h-p):s(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=lb;break}m=0,n=0}c.mode=jb;case jb:if(c.wrap&&c.flags){for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=lb;break}m=0,n=0}c.mode=kb;case kb:xb=D;break a;case lb:xb=G;break a;case mb:return H;case nb:default:return F}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<lb&&(c.mode<ib||b!==z))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=mb,H):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?t(c.check,f,p,a.next_out-p):s(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===V?128:0)+(c.mode===bb||c.mode===Y?256:0),(0===o&&0===p||b===z)&&xb===C&&(xb=I),xb)}function n(a){if(!a||!a.state)return F;var b=a.state;return b.window&&(b.window=null),a.state=null,C}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?F:(c.head=b,b.done=!1,C)):F}var p,q,r=a("../utils/common"),s=a("./adler32"),t=a("./crc32"),u=a("./inffast"),v=a("./inftrees"),w=0,x=1,y=2,z=4,A=5,B=6,C=0,D=1,E=2,F=-2,G=-3,H=-4,I=-5,J=8,K=1,L=2,M=3,N=4,O=5,P=6,Q=7,R=8,S=9,T=10,U=11,V=12,W=13,X=14,Y=15,Z=16,$=17,_=18,ab=19,bb=20,cb=21,db=22,eb=23,fb=24,gb=25,hb=26,ib=27,jb=28,kb=29,lb=30,mb=31,nb=32,ob=852,pb=592,qb=15,rb=qb,sb=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./inffast":34,"./inftrees":36}],36:[function(a,b){"use strict";var c=a("../utils/common"),d=15,e=852,f=592,g=0,h=1,i=2,j=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],k=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],l=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],m=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,n,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new c.Buf16(d+1),Q=new c.Buf16(d+1),R=null,S=0;for(D=0;d>=D;D++)P[D]=0;for(E=0;o>E;E++)P[b[n+E]]++;for(H=C,G=d;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;G>F&&0===P[F];F++);for(F>H&&(H=F),K=1,D=1;d>=D;D++)if(K<<=1,K-=P[D],0>K)return-1;if(K>0&&(a===g||1!==G))return-1;for(Q[1]=0,D=1;d>D;D++)Q[D+1]=Q[D]+P[D];for(E=0;o>E;E++)0!==b[n+E]&&(r[Q[b[n+E]]++]=E);if(a===g?(N=R=r,y=19):a===h?(N=j,O-=257,R=k,S-=257,y=256):(N=l,R=m,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===h&&L>e||a===i&&L>f)return 1;for(var T=0;;){T++,z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[n+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;G>I+J&&(K-=P[I+J],!(0>=K));)I++,K<<=1;if(L+=1<<I,a===h&&L>e||a===i&&L>f)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":27}],37:[function(a,b){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],38:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a){return 256>a?gb[a]:gb[256+(a>>>7)]}function f(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function g(a,b,c){a.bi_valid>V-c?(a.bi_buf|=b<<a.bi_valid&65535,f(a,a.bi_buf),a.bi_buf=b>>V-a.bi_valid,a.bi_valid+=c-V):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function h(a,b,c){g(a,c[2*b],c[2*b+1])}function i(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function j(a){16===a.bi_valid?(f(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function k(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;U>=f;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,c=a.heap_max+1;T>c;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function l(a,b,c){var d,e,f=new Array(U+1),g=0;for(d=1;U>=d;d++)f[d]=g=g+c[d-1]<<1;for(e=0;b>=e;e++){var h=a[2*e+1];0!==h&&(a[2*e]=i(f[h]++,h))}}function m(){var a,b,c,d,e,f=new Array(U+1);for(c=0,d=0;O-1>d;d++)for(ib[d]=c,a=0;a<1<<_[d];a++)hb[c++]=d;for(hb[c-1]=d,e=0,d=0;16>d;d++)for(jb[d]=e,a=0;a<1<<ab[d];a++)gb[e++]=d;for(e>>=7;R>d;d++)for(jb[d]=e<<7,a=0;a<1<<ab[d]-7;a++)gb[256+e++]=d;for(b=0;U>=b;b++)f[b]=0;for(a=0;143>=a;)eb[2*a+1]=8,a++,f[8]++;for(;255>=a;)eb[2*a+1]=9,a++,f[9]++;for(;279>=a;)eb[2*a+1]=7,a++,f[7]++;for(;287>=a;)eb[2*a+1]=8,a++,f[8]++;for(l(eb,Q+1,f),a=0;R>a;a++)fb[2*a+1]=5,fb[2*a]=i(a,5);kb=new nb(eb,_,P+1,Q,U),lb=new nb(fb,ab,0,R,U),mb=new nb(new Array(0),bb,0,S,W)}function n(a){var b;for(b=0;Q>b;b++)a.dyn_ltree[2*b]=0;for(b=0;R>b;b++)a.dyn_dtree[2*b]=0;for(b=0;S>b;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*X]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function o(a){a.bi_valid>8?f(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function p(a,b,c,d){o(a),d&&(f(a,c),f(a,~c)),E.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function q(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function r(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&q(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!q(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function s(a,b,c){var d,f,i,j,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],f=a.pending_buf[a.l_buf+k],k++,0===d?h(a,f,b):(i=hb[f],h(a,i+P+1,b),j=_[i],0!==j&&(f-=ib[i],g(a,f,j)),d--,i=e(d),h(a,i,c),j=ab[i],0!==j&&(d-=jb[i],g(a,d,j)));while(k<a.last_lit);h(a,X,b)}function t(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=T,c=0;i>c;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=2>j?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)r(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],r(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,r(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],k(a,b),l(f,j,a.bl_count)}function u(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;c>=d;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(j>h?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*Y]++):10>=h?a.bl_tree[2*Z]++:a.bl_tree[2*$]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function v(a,b,c){var d,e,f=-1,i=b[1],j=0,k=7,l=4;for(0===i&&(k=138,l=3),d=0;c>=d;d++)if(e=i,i=b[2*(d+1)+1],!(++j<k&&e===i)){if(l>j){do h(a,e,a.bl_tree);while(0!==--j)}else 0!==e?(e!==f&&(h(a,e,a.bl_tree),j--),h(a,Y,a.bl_tree),g(a,j-3,2)):10>=j?(h(a,Z,a.bl_tree),g(a,j-3,3)):(h(a,$,a.bl_tree),g(a,j-11,7));j=0,f=e,0===i?(k=138,l=3):e===i?(k=6,l=3):(k=7,l=4)}}function w(a){var b;for(u(a,a.dyn_ltree,a.l_desc.max_code),u(a,a.dyn_dtree,a.d_desc.max_code),t(a,a.bl_desc),b=S-1;b>=3&&0===a.bl_tree[2*cb[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function x(a,b,c,d){var e;for(g(a,b-257,5),g(a,c-1,5),g(a,d-4,4),e=0;d>e;e++)g(a,a.bl_tree[2*cb[e]+1],3);v(a,a.dyn_ltree,b-1),v(a,a.dyn_dtree,c-1)}function y(a){var b,c=4093624447;for(b=0;31>=b;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return G;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return H;for(b=32;P>b;b++)if(0!==a.dyn_ltree[2*b])return H;return G}function z(a){pb||(m(),pb=!0),a.l_desc=new ob(a.dyn_ltree,kb),a.d_desc=new ob(a.dyn_dtree,lb),a.bl_desc=new ob(a.bl_tree,mb),a.bi_buf=0,a.bi_valid=0,n(a)}function A(a,b,c,d){g(a,(J<<1)+(d?1:0),3),p(a,b,c,!0)}function B(a){g(a,K<<1,3),h(a,X,eb),j(a)}function C(a,b,c,d){var e,f,h=0;a.level>0?(a.strm.data_type===I&&(a.strm.data_type=y(a)),t(a,a.l_desc),t(a,a.d_desc),h=w(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,e>=f&&(e=f)):e=f=c+5,e>=c+4&&-1!==b?A(a,b,c,d):a.strategy===F||f===e?(g(a,(K<<1)+(d?1:0),3),s(a,eb,fb)):(g(a,(L<<1)+(d?1:0),3),x(a,a.l_desc.max_code+1,a.d_desc.max_code+1,h+1),s(a,a.dyn_ltree,a.dyn_dtree)),n(a),d&&o(a)}function D(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(hb[c]+P+1)]++,a.dyn_dtree[2*e(b)]++),a.last_lit===a.lit_bufsize-1}var E=a("../utils/common"),F=4,G=0,H=1,I=2,J=0,K=1,L=2,M=3,N=258,O=29,P=256,Q=P+1+O,R=30,S=19,T=2*Q+1,U=15,V=16,W=7,X=256,Y=16,Z=17,$=18,_=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ab=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],bb=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],db=512,eb=new Array(2*(Q+2));d(eb);var fb=new Array(2*R);d(fb);var gb=new Array(db);d(gb);var hb=new Array(N-M+1);d(hb);var ib=new Array(O);d(ib);var jb=new Array(R);d(jb);var kb,lb,mb,nb=function(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length},ob=function(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b},pb=!1;c._tr_init=z,c._tr_stored_block=A,c._tr_flush_block=C,c._tr_tally=D,c._tr_align=B},{"../utils/common":27}],39:[function(a,b){"use strict";function c(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=c},{}]},{},[9])(9)});'use strict';if(tr.isVinn){global.JSZip=global.window.JSZip;global.window=undefined;}else if(tr.isNode){const jsZipAbsPath=HTMLImportsLoader.hrefToAbsolutePath('/jszip.min.js');const jsZipModule=require(jsZipAbsPath);global.JSZip=jsZipModule;}'use strict';tr.exportTo('tr.e.importer',function(){function ZipImporter(model,eventData){if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);} +!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,f,g,h,i,j="",k=0;k<a.length;)b=a.charCodeAt(k++),c=a.charCodeAt(k++),e=a.charCodeAt(k++),f=b>>2,g=(3&b)<<4|c>>4,h=(15&c)<<2|e>>6,i=63&e,isNaN(c)?h=i=64:isNaN(e)&&(i=64),j=j+d.charAt(f)+d.charAt(g)+d.charAt(h)+d.charAt(i);return j},c.decode=function(a){var b,c,e,f,g,h,i,j="",k=0;for(a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");k<a.length;)f=d.indexOf(a.charAt(k++)),g=d.indexOf(a.charAt(k++)),h=d.indexOf(a.charAt(k++)),i=d.indexOf(a.charAt(k++)),b=f<<2|g>>4,c=(15&g)<<4|h>>2,e=(3&h)<<6|i,j+=String.fromCharCode(b),64!=h&&(j+=String.fromCharCode(c)),64!=i&&(j+=String.fromCharCode(e));return j}},{}],2:[function(a,b){"use strict";function c(){this.compressedSize=0,this.uncompressedSize=0,this.crc32=0,this.compressionMethod=null,this.compressedContent=null}c.prototype={getContent:function(){return null},getCompressedContent:function(){return null}},b.exports=c},{}],3:[function(a,b,c){"use strict";c.STORE={magic:"\x00\x00",compress:function(a){return a},uncompress:function(a){return a},compressInputType:null,uncompressInputType:null},c.DEFLATE=a("./flate")},{"./flate":8}],4:[function(a,b){"use strict";var c=a("./utils"),d=[0,1996959894,3993919788,2567524794,124634137,1886057615,3915621685,2657392035,249268274,2044508324,3772115230,2547177864,162941995,2125561021,3887607047,2428444049,498536548,1789927666,4089016648,2227061214,450548861,1843258603,4107580753,2211677639,325883990,1684777152,4251122042,2321926636,335633487,1661365465,4195302755,2366115317,997073096,1281953886,3579855332,2724688242,1006888145,1258607687,3524101629,2768942443,901097722,1119000684,3686517206,2898065728,853044451,1172266101,3705015759,2882616665,651767980,1373503546,3369554304,3218104598,565507253,1454621731,3485111705,3099436303,671266974,1594198024,3322730930,2970347812,795835527,1483230225,3244367275,3060149565,1994146192,31158534,2563907772,4023717930,1907459465,112637215,2680153253,3904427059,2013776290,251722036,2517215374,3775830040,2137656763,141376813,2439277719,3865271297,1802195444,476864866,2238001368,4066508878,1812370925,453092731,2181625025,4111451223,1706088902,314042704,2344532202,4240017532,1658658271,366619977,2362670323,4224994405,1303535960,984961486,2747007092,3569037538,1256170817,1037604311,2765210733,3554079995,1131014506,879679996,2909243462,3663771856,1141124467,855842277,2852801631,3708648649,1342533948,654459306,3188396048,3373015174,1466479909,544179635,3110523913,3462522015,1591671054,702138776,2966460450,3352799412,1504918807,783551873,3082640443,3233442989,3988292384,2596254646,62317068,1957810842,3939845945,2647816111,81470997,1943803523,3814918930,2489596804,225274430,2053790376,3826175755,2466906013,167816743,2097651377,4027552580,2265490386,503444072,1762050814,4150417245,2154129355,426522225,1852507879,4275313526,2312317920,282753626,1742555852,4189708143,2394877945,397917763,1622183637,3604390888,2714866558,953729732,1340076626,3518719985,2797360999,1068828381,1219638859,3624741850,2936675148,906185462,1090812512,3747672003,2825379669,829329135,1181335161,3412177804,3160834842,628085408,1382605366,3423369109,3138078467,570562233,1426400815,3317316542,2998733608,733239954,1555261956,3268935591,3050360625,752459403,1541320221,2607071920,3965973030,1969922972,40735498,2617837225,3943577151,1913087877,83908371,2512341634,3803740692,2075208622,213261112,2463272603,3855990285,2094854071,198958881,2262029012,4057260610,1759359992,534414190,2176718541,4139329115,1873836001,414664567,2282248934,4279200368,1711684554,285281116,2405801727,4167216745,1634467795,376229701,2685067896,3608007406,1308918612,956543938,2808555105,3495958263,1231636301,1047427035,2932959818,3654703836,1088359270,936918e3,2847714899,3736837829,1202900863,817233897,3183342108,3401237130,1404277552,615818150,3134207493,3453421203,1423857449,601450431,3009837614,3294710456,1567103746,711928724,3020668471,3272380065,1510334235,755167117];b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var e="string"!==c.getTypeOf(a);"undefined"==typeof b&&(b=0);var f=0,g=0,h=0;b=-1^b;for(var i=0,j=a.length;j>i;i++)h=e?a[i]:a.charCodeAt(i),g=255&(b^h),f=d[g],b=b>>>8^f;return-1^b}},{"./utils":21}],5:[function(a,b){"use strict";function c(){this.data=null,this.length=0,this.index=0}var d=a("./utils");c.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<a||0>a)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return d.transformTo("string",this.readData(a))},readData:function(){},lastIndexOfSignature:function(){},readDate:function(){var a=this.readInt(4);return new Date((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1)}},b.exports=c},{"./utils":21}],6:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!1,c.date=null,c.compression=null,c.comment=null},{}],7:[function(a,b,c){"use strict";var d=a("./utils");c.string2binary=function(a){return d.string2binary(a)},c.string2Uint8Array=function(a){return d.transformTo("uint8array",a)},c.uint8Array2String=function(a){return d.transformTo("string",a)},c.string2Blob=function(a){var b=d.transformTo("arraybuffer",a);return d.arrayBuffer2Blob(b)},c.arrayBuffer2Blob=function(a){return d.arrayBuffer2Blob(a)},c.transformTo=function(a,b){return d.transformTo(a,b)},c.getTypeOf=function(a){return d.getTypeOf(a)},c.checkSupport=function(a){return d.checkSupport(a)},c.MAX_VALUE_16BITS=d.MAX_VALUE_16BITS,c.MAX_VALUE_32BITS=d.MAX_VALUE_32BITS,c.pretty=function(a){return d.pretty(a)},c.findCompression=function(a){return d.findCompression(a)},c.isRegExp=function(a){return d.isRegExp(a)}},{"./utils":21}],8:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,e=a("pako");c.uncompressInputType=d?"uint8array":"array",c.compressInputType=d?"uint8array":"array",c.magic="\b\x00",c.compress=function(a){return e.deflateRaw(a)},c.uncompress=function(a){return e.inflateRaw(a)}},{pako:24}],9:[function(a,b){"use strict";function c(a,b){return this instanceof c?(this.files={},this.comment=null,this.root="",a&&this.load(a,b),void(this.clone=function(){var a=new c;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a})):new c(a,b)}var d=a("./base64");c.prototype=a("./object"),c.prototype.load=a("./load"),c.support=a("./support"),c.defaults=a("./defaults"),c.utils=a("./deprecatedPublicUtils"),c.base64={encode:function(a){return d.encode(a)},decode:function(a){return d.decode(a)}},c.compressions=a("./compressions"),b.exports=c},{"./base64":1,"./compressions":3,"./defaults":6,"./deprecatedPublicUtils":7,"./load":10,"./object":13,"./support":17}],10:[function(a,b){"use strict";var c=a("./base64"),d=a("./zipEntries");b.exports=function(a,b){var e,f,g,h;for(b=b||{},b.base64&&(a=c.decode(a)),f=new d(a,b),e=f.files,g=0;g<e.length;g++)h=e[g],this.file(h.fileName,h.decompressed,{binary:!0,optimizedBinaryString:!0,date:h.date,dir:h.dir,comment:h.fileComment.length?h.fileComment:null,createFolders:b.createFolders});return f.zipComment.length&&(this.comment=f.zipComment),this}},{"./base64":1,"./zipEntries":22}],11:[function(a,b){(function(a){"use strict";b.exports=function(b,c){return new a(b,c)},b.exports.test=function(b){return a.isBuffer(b)}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],12:[function(a,b){"use strict";function c(a){this.data=a,this.length=this.data.length,this.index=0}var d=a("./uint8ArrayReader");c.prototype=new d,c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./uint8ArrayReader":18}],13:[function(a,b){"use strict";var c=a("./support"),d=a("./utils"),e=a("./crc32"),f=a("./signature"),g=a("./defaults"),h=a("./base64"),i=a("./compressions"),j=a("./compressedObject"),k=a("./nodeBuffer"),l=a("./utf8"),m=a("./stringWriter"),n=a("./uint8ArrayWriter"),o=function(a){if(a._data instanceof j&&(a._data=a._data.getContent(),a.options.binary=!0,a.options.base64=!1,"uint8array"===d.getTypeOf(a._data))){var b=a._data;a._data=new Uint8Array(b.length),0!==b.length&&a._data.set(b,0)}return a._data},p=function(a){var b=o(a),e=d.getTypeOf(b);return"string"===e?!a.options.binary&&c.nodebuffer?k(b,"utf-8"):a.asBinary():b},q=function(a){var b=o(this);return null===b||"undefined"==typeof b?"":(this.options.base64&&(b=h.decode(b)),b=a&&this.options.binary?A.utf8decode(b):d.transformTo("string",b),a||this.options.binary||(b=d.transformTo("string",A.utf8encode(b))),b)},r=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this._data=b,this.options=c,this._initialMetadata={dir:c.dir,date:c.date}};r.prototype={asText:function(){return q.call(this,!0)},asBinary:function(){return q.call(this,!1)},asNodeBuffer:function(){var a=p(this);return d.transformTo("nodebuffer",a)},asUint8Array:function(){var a=p(this);return d.transformTo("uint8array",a)},asArrayBuffer:function(){return this.asUint8Array().buffer}};var s=function(a,b){var c,d="";for(c=0;b>c;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},t=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},u=function(a){return a=a||{},a.base64!==!0||null!==a.binary&&void 0!==a.binary||(a.binary=!0),a=t(a,g),a.date=a.date||new Date,null!==a.compression&&(a.compression=a.compression.toUpperCase()),a},v=function(a,b,c){var e,f=d.getTypeOf(b);if(c=u(c),c.createFolders&&(e=w(a))&&x.call(this,e,!0),c.dir||null===b||"undefined"==typeof b)c.base64=!1,c.binary=!1,b=null;else if("string"===f)c.binary&&!c.base64&&c.optimizedBinaryString!==!0&&(b=d.string2binary(b));else{if(c.base64=!1,c.binary=!0,!(f||b instanceof j))throw new Error("The data of '"+a+"' is in an unsupported format !");"arraybuffer"===f&&(b=d.transformTo("uint8array",b))}var g=new r(a,b,c);return this.files[a]=g,g},w=function(a){"/"==a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},x=function(a,b){return"/"!=a.slice(-1)&&(a+="/"),b="undefined"!=typeof b?b:!1,this.files[a]||v.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},y=function(a,b){var c,f=new j;return a._data instanceof j?(f.uncompressedSize=a._data.uncompressedSize,f.crc32=a._data.crc32,0===f.uncompressedSize||a.dir?(b=i.STORE,f.compressedContent="",f.crc32=0):a._data.compressionMethod===b.magic?f.compressedContent=a._data.getCompressedContent():(c=a._data.getContent(),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c)))):(c=p(a),(!c||0===c.length||a.dir)&&(b=i.STORE,c=""),f.uncompressedSize=c.length,f.crc32=e(c),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c))),f.compressedSize=f.compressedContent.length,f.compressionMethod=b.magic,f},z=function(a,b,c,g){var h,i,j,k,m=(c.compressedContent,d.transformTo("string",l.utf8encode(b.name))),n=b.comment||"",o=d.transformTo("string",l.utf8encode(n)),p=m.length!==b.name.length,q=o.length!==n.length,r=b.options,t="",u="",v="";j=b._initialMetadata.dir!==b.dir?b.dir:r.dir,k=b._initialMetadata.date!==b.date?b.date:r.date,h=k.getHours(),h<<=6,h|=k.getMinutes(),h<<=5,h|=k.getSeconds()/2,i=k.getFullYear()-1980,i<<=4,i|=k.getMonth()+1,i<<=5,i|=k.getDate(),p&&(u=s(1,1)+s(e(m),4)+m,t+="up"+s(u.length,2)+u),q&&(v=s(1,1)+s(this.crc32(o),4)+o,t+="uc"+s(v.length,2)+v);var w="";w+="\n\x00",w+=p||q?"\x00\b":"\x00\x00",w+=c.compressionMethod,w+=s(h,2),w+=s(i,2),w+=s(c.crc32,4),w+=s(c.compressedSize,4),w+=s(c.uncompressedSize,4),w+=s(m.length,2),w+=s(t.length,2);var x=f.LOCAL_FILE_HEADER+w+m+t,y=f.CENTRAL_FILE_HEADER+"\x00"+w+s(o.length,2)+"\x00\x00\x00\x00"+(j===!0?"\x00\x00\x00":"\x00\x00\x00\x00")+s(g,4)+m+t+o;return{fileRecord:x,dirRecord:y,compressedObject:c}},A={load:function(){throw new Error("Load method is not defined. Is the file jszip-load.js included ?")},filter:function(a){var b,c,d,e,f=[];for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],e=new r(d.name,d._data,t(d.options)),c=b.slice(this.root.length,b.length),b.slice(0,this.root.length)===this.root&&a(c,e)&&f.push(e));return f},file:function(a,b,c){if(1===arguments.length){if(d.isRegExp(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}return this.filter(function(b,c){return!c.dir&&b===a})[0]||null}return a=this.root+a,v.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d.isRegExp(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=x.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!=a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){a=t(a||{},{base64:!0,compression:"STORE",type:"base64",comment:null}),d.checkSupport(a.type);var b,c,e=[],g=0,j=0,k=d.transformTo("string",this.utf8encode(a.comment||this.comment||""));for(var l in this.files)if(this.files.hasOwnProperty(l)){var o=this.files[l],p=o.options.compression||a.compression.toUpperCase(),q=i[p];if(!q)throw new Error(p+" is not a valid compression method !");var r=y.call(this,o,q),u=z.call(this,l,o,r,g);g+=u.fileRecord.length+r.compressedSize,j+=u.dirRecord.length,e.push(u)}var v="";v=f.CENTRAL_DIRECTORY_END+"\x00\x00\x00\x00"+s(e.length,2)+s(e.length,2)+s(j,4)+s(g,4)+s(k.length,2)+k;var w=a.type.toLowerCase();for(b="uint8array"===w||"arraybuffer"===w||"blob"===w||"nodebuffer"===w?new n(g+j+v.length):new m(g+j+v.length),c=0;c<e.length;c++)b.append(e[c].fileRecord),b.append(e[c].compressedObject.compressedContent);for(c=0;c<e.length;c++)b.append(e[c].dirRecord);b.append(v);var x=b.finalize();switch(a.type.toLowerCase()){case"uint8array":case"arraybuffer":case"nodebuffer":return d.transformTo(a.type.toLowerCase(),x);case"blob":return d.arrayBuffer2Blob(d.transformTo("arraybuffer",x));case"base64":return a.base64?h.encode(x):x;default:return x}},crc32:function(a,b){return e(a,b)},utf8encode:function(a){return d.transformTo("string",l.utf8encode(a))},utf8decode:function(a){return l.utf8decode(a)}};b.exports=A},{"./base64":1,"./compressedObject":2,"./compressions":3,"./crc32":4,"./defaults":6,"./nodeBuffer":11,"./signature":14,"./stringWriter":16,"./support":17,"./uint8ArrayWriter":19,"./utf8":20,"./utils":21}],14:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],15:[function(a,b){"use strict";function c(a,b){this.data=a,b||(this.data=e.string2binary(this.data)),this.length=this.data.length,this.index=0}var d=a("./dataReader"),e=a("./utils");c.prototype=new d,c.prototype.byteAt=function(a){return this.data.charCodeAt(a)},c.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)},c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5,"./utils":21}],16:[function(a,b){"use strict";var c=a("./utils"),d=function(){this.data=[]};d.prototype={append:function(a){a=c.transformTo("string",a),this.data.push(a)},finalize:function(){return this.data.join("")}},b.exports=d},{"./utils":21}],17:[function(a,b,c){(function(a){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof a,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var b=new ArrayBuffer(0);try{c.blob=0===new Blob([b],{type:"application/zip"}).size}catch(d){try{var e=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,f=new e;f.append(b),c.blob=0===f.getBlob("application/zip").size}catch(d){c.blob=!1}}}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],18:[function(a,b){"use strict";function c(a){a&&(this.data=a,this.length=this.data.length,this.index=0)}var d=a("./dataReader");c.prototype=new d,c.prototype.byteAt=function(a){return this.data[a]},c.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f;return-1},c.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5}],19:[function(a,b){"use strict";var c=a("./utils"),d=function(a){this.data=new Uint8Array(a),this.index=0};d.prototype={append:function(a){0!==a.length&&(a=c.transformTo("uint8array",a),this.data.set(a,this.index),this.index+=a.length)},finalize:function(){return this.data}},b.exports=d},{"./utils":21}],20:[function(a,b,c){"use strict";for(var d=a("./utils"),e=a("./support"),f=a("./nodeBuffer"),g=new Array(256),h=0;256>h;h++)g[h]=h>=252?6:h>=248?5:h>=240?4:h>=224?3:h>=192?2:1;g[254]=g[254]=1;var i=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=e.uint8array?new Uint8Array(i):new Array(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},j=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+g[a[c]]>b?c:b},k=function(a){var b,c,e,f,h=a.length,i=new Array(2*h);for(c=0,b=0;h>b;)if(e=a[b++],128>e)i[c++]=e;else if(f=g[e],f>4)i[c++]=65533,b+=f-1;else{for(e&=2===f?31:3===f?15:7;f>1&&h>b;)e=e<<6|63&a[b++],f--;f>1?i[c++]=65533:65536>e?i[c++]=e:(e-=65536,i[c++]=55296|e>>10&1023,i[c++]=56320|1023&e)}return i.length!==c&&(i.subarray?i=i.subarray(0,c):i.length=c),d.applyFromCharCode(i)};c.utf8encode=function(a){return e.nodebuffer?f(a,"utf-8"):i(a)},c.utf8decode=function(a){if(e.nodebuffer)return d.transformTo("nodebuffer",a).toString("utf-8");a=d.transformTo(e.uint8array?"uint8array":"array",a);for(var b=[],c=0,f=a.length,g=65536;f>c;){var h=j(a,Math.min(c+g,f));b.push(e.uint8array?k(a.subarray(c,h)):k(a.slice(c,h))),c=h}return b.join("")}},{"./nodeBuffer":11,"./support":17,"./utils":21}],21:[function(a,b,c){"use strict";function d(a){return a}function e(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function f(a){var b=65536,d=[],e=a.length,f=c.getTypeOf(a),g=0,h=!0;try{switch(f){case"uint8array":String.fromCharCode.apply(null,new Uint8Array(0));break;case"nodebuffer":String.fromCharCode.apply(null,j(0))}}catch(i){h=!1}if(!h){for(var k="",l=0;l<a.length;l++)k+=String.fromCharCode(a[l]);return k}for(;e>g&&b>1;)try{d.push("array"===f||"nodebuffer"===f?String.fromCharCode.apply(null,a.slice(g,Math.min(g+b,e))):String.fromCharCode.apply(null,a.subarray(g,Math.min(g+b,e)))),g+=b}catch(i){b=Math.floor(b/2)}return d.join("")}function g(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];return b}var h=a("./support"),i=a("./compressions"),j=a("./nodeBuffer");c.string2binary=function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(255&a.charCodeAt(c));return b},c.arrayBuffer2Blob=function(a){c.checkSupport("blob");try{return new Blob([a],{type:"application/zip"})}catch(b){try{var d=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,e=new d;return e.append(a),e.getBlob("application/zip")}catch(b){throw new Error("Bug : can't construct the Blob.")}}},c.applyFromCharCode=f;var k={};k.string={string:d,array:function(a){return e(a,new Array(a.length))},arraybuffer:function(a){return k.string.uint8array(a).buffer},uint8array:function(a){return e(a,new Uint8Array(a.length))},nodebuffer:function(a){return e(a,j(a.length))}},k.array={string:f,array:d,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(a)}},k.arraybuffer={string:function(a){return f(new Uint8Array(a))},array:function(a){return g(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:d,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(new Uint8Array(a))}},k.uint8array={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:d,nodebuffer:function(a){return j(a)}},k.nodebuffer={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return k.nodebuffer.uint8array(a).buffer},uint8array:function(a){return g(a,new Uint8Array(a.length))},nodebuffer:d},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=k[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":h.nodebuffer&&j.test(a)?"nodebuffer":h.uint8array&&a instanceof Uint8Array?"uint8array":h.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=h[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this browser")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(16>b?"0":"")+b.toString(16).toUpperCase();return d},c.findCompression=function(a){for(var b in i)if(i.hasOwnProperty(b)&&i[b].magic===a)return i[b];return null},c.isRegExp=function(a){return"[object RegExp]"===Object.prototype.toString.call(a)}},{"./compressions":3,"./nodeBuffer":11,"./support":17}],22:[function(a,b){"use strict";function c(a,b){this.files=[],this.loadOptions=b,a&&this.load(a)}var d=a("./stringReader"),e=a("./nodeBufferReader"),f=a("./uint8ArrayReader"),g=a("./utils"),h=a("./signature"),i=a("./zipEntry"),j=a("./support"),k=a("./object");c.prototype={checkSignature:function(a){var b=this.reader.readString(4);if(b!==a)throw new Error("Corrupted zip or bug : unexpected signature ("+g.pretty(b)+", expected "+g.pretty(a)+")")},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2),this.zipComment=this.reader.readString(this.zipCommentLength),this.zipComment=k.utf8decode(this.zipComment)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.versionMadeBy=this.reader.readString(2),this.versionNeeded=this.reader.readInt(2),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;d>e;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readString(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(h.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readString(4)===h.CENTRAL_FILE_HEADER;)a=new i({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(h.CENTRAL_DIRECTORY_END);if(-1===a)throw new Error("Corrupted zip : can't find end of central directory");if(this.reader.setIndex(a),this.checkSignature(h.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===g.MAX_VALUE_16BITS||this.diskWithCentralDirStart===g.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===g.MAX_VALUE_16BITS||this.centralDirRecords===g.MAX_VALUE_16BITS||this.centralDirSize===g.MAX_VALUE_32BITS||this.centralDirOffset===g.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),-1===a)throw new Error("Corrupted zip : can't find the ZIP64 end of central directory locator");this.reader.setIndex(a),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}},prepareReader:function(a){var b=g.getTypeOf(a);this.reader="string"!==b||j.uint8array?"nodebuffer"===b?new e(a):new f(g.transformTo("uint8array",a)):new d(a,this.loadOptions.optimizedBinaryString)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=c},{"./nodeBufferReader":12,"./object":13,"./signature":14,"./stringReader":15,"./support":17,"./uint8ArrayReader":18,"./utils":21,"./zipEntry":23}],23:[function(a,b){"use strict";function c(a,b){this.options=a,this.loadOptions=b}var d=a("./stringReader"),e=a("./utils"),f=a("./compressedObject"),g=a("./object");c.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},prepareCompressedContent:function(a,b,c){return function(){var d=a.index;a.setIndex(b);var e=a.readData(c);return a.setIndex(d),e}},prepareContent:function(a,b,c,d,f){return function(){var a=e.transformTo(d.uncompressInputType,this.getCompressedContent()),b=d.uncompress(a);if(b.length!==f)throw new Error("Bug : uncompressed data size mismatch");return b}},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readString(this.fileNameLength),a.skip(c),-1==this.compressedSize||-1==this.uncompressedSize)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize == -1 || uncompressedSize == -1)");if(b=e.findCompression(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+e.pretty(this.compressionMethod)+" unknown (inner file : "+this.fileName+")");if(this.decompressed=new f,this.decompressed.compressedSize=this.compressedSize,this.decompressed.uncompressedSize=this.uncompressedSize,this.decompressed.crc32=this.crc32,this.decompressed.compressionMethod=this.compressionMethod,this.decompressed.getCompressedContent=this.prepareCompressedContent(a,a.index,this.compressedSize,b),this.decompressed.getContent=this.prepareContent(a,a.index,this.compressedSize,b,this.uncompressedSize),this.loadOptions.checkCRC32&&(this.decompressed=e.transformTo("string",this.decompressed.getContent()),g.crc32(this.decompressed)!==this.crc32))throw new Error("Corrupted zip : CRC32 mismatch")},readCentralPart:function(a){if(this.versionMadeBy=a.readString(2),this.versionNeeded=a.readInt(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4),this.fileNameLength=a.readInt(2),this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");this.fileName=a.readString(this.fileNameLength),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readString(this.fileCommentLength),this.dir=16&this.externalFileAttributes?!0:!1},parseZIP64ExtraField:function(){if(this.extraFields[1]){var a=new d(this.extraFields[1].value);this.uncompressedSize===e.MAX_VALUE_32BITS&&(this.uncompressedSize=a.readInt(8)),this.compressedSize===e.MAX_VALUE_32BITS&&(this.compressedSize=a.readInt(8)),this.localHeaderOffset===e.MAX_VALUE_32BITS&&(this.localHeaderOffset=a.readInt(8)),this.diskNumberStart===e.MAX_VALUE_32BITS&&(this.diskNumberStart=a.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index;for(this.extraFields=this.extraFields||{};a.index<e+this.extraFieldsLength;)b=a.readInt(2),c=a.readInt(2),d=a.readString(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){if(this.useUTF8())this.fileName=g.utf8decode(this.fileName),this.fileComment=g.utf8decode(this.fileComment);else{var a=this.findExtraFieldUnicodePath();null!==a&&(this.fileName=a);var b=this.findExtraFieldUnicodeComment();null!==b&&(this.fileComment=b)}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileName)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileComment)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null}},b.exports=c},{"./compressedObject":2,"./object":13,"./stringReader":15,"./utils":21}],24:[function(a,b){"use strict";var c=a("./lib/utils/common").assign,d=a("./lib/deflate"),e=a("./lib/inflate"),f=a("./lib/zlib/constants"),g={};c(g,d,e,f),b.exports=g},{"./lib/deflate":25,"./lib/inflate":26,"./lib/utils/common":27,"./lib/zlib/constants":30}],25:[function(a,b,c){"use strict";function d(a,b){var c=new s(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}function f(a,b){return b=b||{},b.gzip=!0,d(a,b)}var g=a("./zlib/deflate.js"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=0,m=4,n=0,o=1,p=-1,q=0,r=8,s=function(a){this.options=h.assign({level:p,method:r,chunkSize:16384,windowBits:15,memLevel:8,strategy:q,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=g.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==n)throw new Error(j[c]);b.header&&g.deflateSetHeader(this.strm,b.header)};s.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?m:l,e.input="string"==typeof a?i.string2buf(a):a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new h.Buf8(f),e.next_out=0,e.avail_out=f),c=g.deflate(e,d),c!==o&&c!==n)return this.onEnd(c),this.ended=!0,!1;(0===e.avail_out||0===e.avail_in&&d===m)&&this.onData("string"===this.options.to?i.buf2binstring(h.shrinkBuf(e.output,e.next_out)):h.shrinkBuf(e.output,e.next_out))}while((e.avail_in>0||0===e.avail_out)&&c!==o);return d===m?(c=g.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===n):!0},s.prototype.onData=function(a){this.chunks.push(a)},s.prototype.onEnd=function(a){a===n&&(this.result="string"===this.options.to?this.chunks.join(""):h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=s,c.deflate=d,c.deflateRaw=e,c.gzip=f},{"./utils/common":27,"./utils/strings":28,"./zlib/deflate.js":32,"./zlib/messages":37,"./zlib/zstream":39}],26:[function(a,b,c){"use strict";function d(a,b){var c=new m(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}var f=a("./zlib/inflate.js"),g=a("./utils/common"),h=a("./utils/strings"),i=a("./zlib/constants"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=a("./zlib/gzheader"),m=function(a){this.options=g.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=f.inflateInit2(this.strm,b.windowBits);if(c!==i.Z_OK)throw new Error(j[c]);this.header=new l,f.inflateGetHeader(this.strm,this.header)};m.prototype.push=function(a,b){var c,d,e,j,k,l=this.strm,m=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?i.Z_FINISH:i.Z_NO_FLUSH,l.input="string"==typeof a?h.binstring2buf(a):a,l.next_in=0,l.avail_in=l.input.length;do{if(0===l.avail_out&&(l.output=new g.Buf8(m),l.next_out=0,l.avail_out=m),c=f.inflate(l,i.Z_NO_FLUSH),c!==i.Z_STREAM_END&&c!==i.Z_OK)return this.onEnd(c),this.ended=!0,!1;l.next_out&&(0===l.avail_out||c===i.Z_STREAM_END||0===l.avail_in&&d===i.Z_FINISH)&&("string"===this.options.to?(e=h.utf8border(l.output,l.next_out),j=l.next_out-e,k=h.buf2string(l.output,e),l.next_out=j,l.avail_out=m-j,j&&g.arraySet(l.output,l.output,e,j,0),this.onData(k)):this.onData(g.shrinkBuf(l.output,l.next_out)))}while(l.avail_in>0&&c!==i.Z_STREAM_END);return c===i.Z_STREAM_END&&(d=i.Z_FINISH),d===i.Z_FINISH?(c=f.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===i.Z_OK):!0},m.prototype.onData=function(a){this.chunks.push(a)},m.prototype.onEnd=function(a){a===i.Z_OK&&(this.result="string"===this.options.to?this.chunks.join(""):g.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=m,c.inflate=d,c.inflateRaw=e,c.ungzip=d},{"./utils/common":27,"./utils/strings":28,"./zlib/constants":30,"./zlib/gzheader":33,"./zlib/inflate.js":35,"./zlib/messages":37,"./zlib/zstream":39}],27:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;c>b;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;c>b;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],28:[function(a,b,c){"use strict";function d(a,b){if(65537>b&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;b>d;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;256>j;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=new e.Buf8(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;d>c;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;h>c;)if(f=a[c++],128>f)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&h>c;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:65536>f?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":27}],29:[function(a,b){"use strict";function c(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0}b.exports=c},{}],30:[function(a,b){b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],31:[function(a,b){"use strict";function c(){for(var a,b=[],c=0;256>c;c++){a=c;for(var d=0;8>d;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function d(a,b,c,d){var f=e,g=d+c;a=-1^a;for(var h=d;g>h;h++)a=a>>>8^f[255&(a^b[h])];return-1^a}var e=c();b.exports=d},{}],32:[function(a,b,c){"use strict";function d(a,b){return a.msg=G[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(C.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){D._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,C.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=E(a.adler,b,e,c):2===a.state.wrap&&(a.adler=F(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-jb?a.strstart-(a.w_size-jb):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ib,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&m>f);if(d=ib-(m-f),f=m-ib,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-jb)){C.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=hb)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+hb-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<hb)););}while(a.lookahead<jb&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===H)return sb;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return sb;if(a.strstart-a.block_start>=a.w_size-jb&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?sb:sb}function o(a,b){for(var c,d;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c)),a.match_length>=hb)if(d=D._tr_tally(a,a.strstart-a.match_start,a.match_length-hb),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=hb){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function p(a,b){for(var c,d,e;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=hb-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===S||a.match_length===hb&&a.strstart-a.match_start>4096)&&(a.match_length=hb-1)),a.prev_length>=hb&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-hb,d=D._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-hb),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=hb-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return sb}else if(a.match_available){if(d=D._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return sb}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=D._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ib){if(m(a),a.lookahead<=ib&&b===H)return sb;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=hb&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ib;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&f>e);a.match_length=ib-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=hb?(c=D._tr_tally(a,1,a.match_length-hb),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===H)return sb;break}if(a.match_length=0,c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function s(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=B[a.level].max_lazy,a.good_match=B[a.level].good_length,a.nice_match=B[a.level].nice_length,a.max_chain_length=B[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=hb-1,a.match_available=0,a.ins_h=0}function t(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=Y,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new C.Buf16(2*fb),this.dyn_dtree=new C.Buf16(2*(2*db+1)),this.bl_tree=new C.Buf16(2*(2*eb+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new C.Buf16(gb+1),this.heap=new C.Buf16(2*cb+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new C.Buf16(2*cb+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function u(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=X,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?lb:qb,a.adler=2===b.wrap?0:1,b.last_flush=H,D._tr_init(b),M):d(a,O)}function v(a){var b=u(a);return b===M&&s(a.state),b}function w(a,b){return a&&a.state?2!==a.state.wrap?O:(a.state.gzhead=b,M):O}function x(a,b,c,e,f,g){if(!a)return O;var h=1;if(b===R&&(b=6),0>e?(h=0,e=-e):e>15&&(h=2,e-=16),1>f||f>Z||c!==Y||8>e||e>15||0>b||b>9||0>g||g>V)return d(a,O);8===e&&(e=9);var i=new t;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+hb-1)/hb),i.window=new C.Buf8(2*i.w_size),i.head=new C.Buf16(i.hash_size),i.prev=new C.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new C.Buf8(i.pending_buf_size),i.d_buf=i.lit_bufsize>>1,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,v(a)}function y(a,b){return x(a,b,Y,$,_,W)}function z(a,b){var c,h,k,l;if(!a||!a.state||b>L||0>b)return a?d(a,O):O;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===rb&&b!==K)return d(a,0===a.avail_out?Q:O);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===lb)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=F(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=mb):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,wb),h.status=qb);else{var m=Y+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=T||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=kb),m+=31-m%31,h.status=qb,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===mb)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=nb)}else h.status=nb;if(h.status===nb)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=ob)}else h.status=ob;if(h.status===ob)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=pb)}else h.status=pb;if(h.status===pb&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=qb)):h.status=qb),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,M}else if(0===a.avail_in&&e(b)<=e(c)&&b!==K)return d(a,Q);if(h.status===rb&&0!==a.avail_in)return d(a,Q);if(0!==a.avail_in||0!==h.lookahead||b!==H&&h.status!==rb){var o=h.strategy===T?r(h,b):h.strategy===U?q(h,b):B[h.level].func(h,b);if((o===ub||o===vb)&&(h.status=rb),o===sb||o===ub)return 0===a.avail_out&&(h.last_flush=-1),M;if(o===tb&&(b===I?D._tr_align(h):b!==L&&(D._tr_stored_block(h,0,0,!1),b===J&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,M}return b!==K?M:h.wrap<=0?N:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?M:N)}function A(a){var b;return a&&a.state?(b=a.state.status,b!==lb&&b!==mb&&b!==nb&&b!==ob&&b!==pb&&b!==qb&&b!==rb?d(a,O):(a.state=null,b===qb?d(a,P):M)):O}var B,C=a("../utils/common"),D=a("./trees"),E=a("./adler32"),F=a("./crc32"),G=a("./messages"),H=0,I=1,J=3,K=4,L=5,M=0,N=1,O=-2,P=-3,Q=-5,R=-1,S=1,T=2,U=3,V=4,W=0,X=2,Y=8,Z=9,$=15,_=8,ab=29,bb=256,cb=bb+1+ab,db=30,eb=19,fb=2*cb+1,gb=15,hb=3,ib=258,jb=ib+hb+1,kb=32,lb=42,mb=69,nb=73,ob=91,pb=103,qb=113,rb=666,sb=1,tb=2,ub=3,vb=4,wb=3,xb=function(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e};B=[new xb(0,0,0,0,n),new xb(4,4,8,4,o),new xb(4,5,16,8,o),new xb(4,6,32,32,o),new xb(4,4,16,16,p),new xb(8,16,32,32,p),new xb(8,16,128,128,p),new xb(8,32,128,256,p),new xb(32,128,258,1024,p),new xb(32,258,258,4096,p)],c.deflateInit=y,c.deflateInit2=x,c.deflateReset=v,c.deflateResetKeep=u,c.deflateSetHeader=w,c.deflate=z,c.deflateEnd=A,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./messages":37,"./trees":38}],33:[function(a,b){"use strict";function c(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=c},{}],34:[function(a,b){"use strict";var c=30,d=12;b.exports=function(a,b){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;e=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=e.dmax,l=e.wsize,m=e.whave,n=e.wnext,o=e.window,p=e.hold,q=e.bits,r=e.lencode,s=e.distcode,t=(1<<e.lenbits)-1,u=(1<<e.distbits)-1;a:do{15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){e.mode=d;break a}a.msg="invalid literal/length code",e.mode=c;break a}x=65535&v,w&=15,w&&(w>q&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",e.mode=c;break a}if(y=65535&v,w&=15,w>q&&(p+=B[f++]<<q,q+=8,w>q&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",e.mode=c;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&e.sane){a.msg="invalid distance too far back",e.mode=c;break a}if(z=0,A=o,0===n){if(z+=l-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(w>n){if(z+=l+n-w,w-=n,x>w){x-=w;do C[h++]=o[z++];while(--w);if(z=0,x>n){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(g>f&&j>h);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=g>f?5+(g-f):5-(f-g),a.avail_out=j>h?257+(j-h):257-(h-j),e.hold=p,e.bits=q}},{}],35:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new r.Buf16(320),this.work=new r.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=K,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new r.Buf32(ob),b.distcode=b.distdyn=new r.Buf32(pb),b.sane=1,b.back=-1,C):F}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):F}function h(a,b){var c,d;return a&&a.state?(d=a.state,0>b?(c=0,b=-b):(c=(b>>4)+1,48>b&&(b&=15)),b&&(8>b||b>15)?F:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):F}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==C&&(a.state=null),c):F}function j(a){return i(a,rb)}function k(a){if(sb){var b;for(p=new r.Buf32(512),q=new r.Buf32(32),b=0;144>b;)a.lens[b++]=8;for(;256>b;)a.lens[b++]=9;for(;280>b;)a.lens[b++]=7;for(;288>b;)a.lens[b++]=8;for(v(x,a.lens,0,288,p,0,a.work,{bits:9}),b=0;32>b;)a.lens[b++]=5;v(y,a.lens,0,32,q,0,a.work,{bits:5}),sb=!1}a.lencode=p,a.lenbits=9,a.distcode=q,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new r.Buf8(f.wsize)),d>=f.wsize?(r.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),r.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(r.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,ob,pb,qb,rb,sb,tb,ub,vb,wb,xb,yb,zb,Ab=0,Bb=new r.Buf8(4),Cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return F;c=a.state,c.mode===V&&(c.mode=W),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xb=C;a:for(;;)switch(c.mode){case K:if(0===c.wrap){c.mode=W;break}for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0),m=0,n=0,c.mode=L;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=lb;break}if((15&m)!==J){a.msg="unknown compression method",c.mode=lb;break}if(m>>>=4,n-=4,wb=(15&m)+8,0===c.wbits)c.wbits=wb;else if(wb>c.wbits){a.msg="invalid window size",c.mode=lb;break}c.dmax=1<<wb,a.adler=c.check=1,c.mode=512&m?T:V,m=0,n=0;break;case L:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==J){a.msg="unknown compression method",c.mode=lb;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=lb;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=M;case M:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,Bb[2]=m>>>16&255,Bb[3]=m>>>24&255,c.check=t(c.check,Bb,4,0)),m=0,n=0,c.mode=N;case N:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=O;case O:if(1024&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=P;case P:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wb=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),r.arraySet(c.head.extra,e,g,q,wb)),512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=Q;case Q:if(2048&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.name+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=R;case R:if(4096&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.comment+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.comment=null);c.mode=S;case S:if(512&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=lb;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=V;break;case T:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=U;case U:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,E;a.adler=c.check=1,c.mode=V;case V:if(b===A||b===B)break a;case W:if(c.last){m>>>=7&n,n-=7&n,c.mode=ib;break}for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=X;break;case 1:if(k(c),c.mode=bb,b===B){m>>>=2,n-=2;break a}break;case 2:c.mode=$;break;case 3:a.msg="invalid block type",c.mode=lb}m>>>=2,n-=2;break;case X:for(m>>>=7&n,n-=7&n;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=lb;break}if(c.length=65535&m,m=0,n=0,c.mode=Y,b===B)break a;case Y:c.mode=Z;case Z:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;r.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=V;break;case $:for(;14>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=lb;break}c.have=0,c.mode=_;case _:for(;c.have<c.ncode;){for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Cb[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Cb[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,yb={bits:c.lenbits},xb=v(w,c.lens,0,19,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid code lengths set",c.mode=lb;break}c.have=0,c.mode=ab;case ab:for(;c.have<c.nlen+c.ndist;){for(;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(16>sb)m>>>=qb,n-=qb,c.lens[c.have++]=sb;else{if(16===sb){for(zb=qb+2;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qb,n-=qb,0===c.have){a.msg="invalid bit length repeat",c.mode=lb;break}wb=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sb){for(zb=qb+3;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=3+(7&m),m>>>=3,n-=3}else{for(zb=qb+7;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=lb;break}for(;q--;)c.lens[c.have++]=wb}}if(c.mode===lb)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=lb;break}if(c.lenbits=9,yb={bits:c.lenbits},xb=v(x,c.lens,0,c.nlen,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid literal/lengths set",c.mode=lb;break}if(c.distbits=6,c.distcode=c.distdyn,yb={bits:c.distbits},xb=v(y,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,yb),c.distbits=yb.bits,xb){a.msg="invalid distances set",c.mode=lb;break}if(c.mode=bb,b===B)break a;case bb:c.mode=cb;case cb:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,u(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===V&&(c.back=-1);break}for(c.back=0;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(rb&&0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.lencode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,c.length=sb,0===rb){c.mode=hb;break}if(32&rb){c.back=-1,c.mode=V;break}if(64&rb){a.msg="invalid literal/length code",c.mode=lb;break}c.extra=15&rb,c.mode=db;case db:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=eb;case eb:for(;Ab=c.distcode[m&(1<<c.distbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.distcode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,64&rb){a.msg="invalid distance code",c.mode=lb;break}c.offset=sb,c.extra=15&rb,c.mode=fb;case fb:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=lb;break}c.mode=gb;case gb:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.sane){a.msg="invalid distance too far back",c.mode=lb;break}q>c.wnext?(q-=c.wnext,ob=c.wsize-q):ob=c.wnext-q,q>c.length&&(q=c.length),pb=c.window}else pb=f,ob=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pb[ob++];while(--q);0===c.length&&(c.mode=cb);break;case hb:if(0===j)break a;f[h++]=c.length,j--,c.mode=cb;break;case ib:if(c.wrap){for(;32>n;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?t(c.check,f,p,h-p):s(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=lb;break}m=0,n=0}c.mode=jb;case jb:if(c.wrap&&c.flags){for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=lb;break}m=0,n=0}c.mode=kb;case kb:xb=D;break a;case lb:xb=G;break a;case mb:return H;case nb:default:return F}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<lb&&(c.mode<ib||b!==z))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=mb,H):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?t(c.check,f,p,a.next_out-p):s(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===V?128:0)+(c.mode===bb||c.mode===Y?256:0),(0===o&&0===p||b===z)&&xb===C&&(xb=I),xb)}function n(a){if(!a||!a.state)return F;var b=a.state;return b.window&&(b.window=null),a.state=null,C}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?F:(c.head=b,b.done=!1,C)):F}var p,q,r=a("../utils/common"),s=a("./adler32"),t=a("./crc32"),u=a("./inffast"),v=a("./inftrees"),w=0,x=1,y=2,z=4,A=5,B=6,C=0,D=1,E=2,F=-2,G=-3,H=-4,I=-5,J=8,K=1,L=2,M=3,N=4,O=5,P=6,Q=7,R=8,S=9,T=10,U=11,V=12,W=13,X=14,Y=15,Z=16,$=17,_=18,ab=19,bb=20,cb=21,db=22,eb=23,fb=24,gb=25,hb=26,ib=27,jb=28,kb=29,lb=30,mb=31,nb=32,ob=852,pb=592,qb=15,rb=qb,sb=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./inffast":34,"./inftrees":36}],36:[function(a,b){"use strict";var c=a("../utils/common"),d=15,e=852,f=592,g=0,h=1,i=2,j=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],k=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],l=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],m=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,n,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new c.Buf16(d+1),Q=new c.Buf16(d+1),R=null,S=0;for(D=0;d>=D;D++)P[D]=0;for(E=0;o>E;E++)P[b[n+E]]++;for(H=C,G=d;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;G>F&&0===P[F];F++);for(F>H&&(H=F),K=1,D=1;d>=D;D++)if(K<<=1,K-=P[D],0>K)return-1;if(K>0&&(a===g||1!==G))return-1;for(Q[1]=0,D=1;d>D;D++)Q[D+1]=Q[D]+P[D];for(E=0;o>E;E++)0!==b[n+E]&&(r[Q[b[n+E]]++]=E);if(a===g?(N=R=r,y=19):a===h?(N=j,O-=257,R=k,S-=257,y=256):(N=l,R=m,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===h&&L>e||a===i&&L>f)return 1;for(var T=0;;){T++,z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[n+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;G>I+J&&(K-=P[I+J],!(0>=K));)I++,K<<=1;if(L+=1<<I,a===h&&L>e||a===i&&L>f)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":27}],37:[function(a,b){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],38:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a){return 256>a?gb[a]:gb[256+(a>>>7)]}function f(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function g(a,b,c){a.bi_valid>V-c?(a.bi_buf|=b<<a.bi_valid&65535,f(a,a.bi_buf),a.bi_buf=b>>V-a.bi_valid,a.bi_valid+=c-V):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function h(a,b,c){g(a,c[2*b],c[2*b+1])}function i(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function j(a){16===a.bi_valid?(f(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function k(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;U>=f;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,c=a.heap_max+1;T>c;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function l(a,b,c){var d,e,f=new Array(U+1),g=0;for(d=1;U>=d;d++)f[d]=g=g+c[d-1]<<1;for(e=0;b>=e;e++){var h=a[2*e+1];0!==h&&(a[2*e]=i(f[h]++,h))}}function m(){var a,b,c,d,e,f=new Array(U+1);for(c=0,d=0;O-1>d;d++)for(ib[d]=c,a=0;a<1<<_[d];a++)hb[c++]=d;for(hb[c-1]=d,e=0,d=0;16>d;d++)for(jb[d]=e,a=0;a<1<<ab[d];a++)gb[e++]=d;for(e>>=7;R>d;d++)for(jb[d]=e<<7,a=0;a<1<<ab[d]-7;a++)gb[256+e++]=d;for(b=0;U>=b;b++)f[b]=0;for(a=0;143>=a;)eb[2*a+1]=8,a++,f[8]++;for(;255>=a;)eb[2*a+1]=9,a++,f[9]++;for(;279>=a;)eb[2*a+1]=7,a++,f[7]++;for(;287>=a;)eb[2*a+1]=8,a++,f[8]++;for(l(eb,Q+1,f),a=0;R>a;a++)fb[2*a+1]=5,fb[2*a]=i(a,5);kb=new nb(eb,_,P+1,Q,U),lb=new nb(fb,ab,0,R,U),mb=new nb(new Array(0),bb,0,S,W)}function n(a){var b;for(b=0;Q>b;b++)a.dyn_ltree[2*b]=0;for(b=0;R>b;b++)a.dyn_dtree[2*b]=0;for(b=0;S>b;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*X]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function o(a){a.bi_valid>8?f(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function p(a,b,c,d){o(a),d&&(f(a,c),f(a,~c)),E.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function q(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function r(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&q(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!q(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function s(a,b,c){var d,f,i,j,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],f=a.pending_buf[a.l_buf+k],k++,0===d?h(a,f,b):(i=hb[f],h(a,i+P+1,b),j=_[i],0!==j&&(f-=ib[i],g(a,f,j)),d--,i=e(d),h(a,i,c),j=ab[i],0!==j&&(d-=jb[i],g(a,d,j)));while(k<a.last_lit);h(a,X,b)}function t(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=T,c=0;i>c;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=2>j?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)r(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],r(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,r(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],k(a,b),l(f,j,a.bl_count)}function u(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;c>=d;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(j>h?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*Y]++):10>=h?a.bl_tree[2*Z]++:a.bl_tree[2*$]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function v(a,b,c){var d,e,f=-1,i=b[1],j=0,k=7,l=4;for(0===i&&(k=138,l=3),d=0;c>=d;d++)if(e=i,i=b[2*(d+1)+1],!(++j<k&&e===i)){if(l>j){do h(a,e,a.bl_tree);while(0!==--j)}else 0!==e?(e!==f&&(h(a,e,a.bl_tree),j--),h(a,Y,a.bl_tree),g(a,j-3,2)):10>=j?(h(a,Z,a.bl_tree),g(a,j-3,3)):(h(a,$,a.bl_tree),g(a,j-11,7));j=0,f=e,0===i?(k=138,l=3):e===i?(k=6,l=3):(k=7,l=4)}}function w(a){var b;for(u(a,a.dyn_ltree,a.l_desc.max_code),u(a,a.dyn_dtree,a.d_desc.max_code),t(a,a.bl_desc),b=S-1;b>=3&&0===a.bl_tree[2*cb[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function x(a,b,c,d){var e;for(g(a,b-257,5),g(a,c-1,5),g(a,d-4,4),e=0;d>e;e++)g(a,a.bl_tree[2*cb[e]+1],3);v(a,a.dyn_ltree,b-1),v(a,a.dyn_dtree,c-1)}function y(a){var b,c=4093624447;for(b=0;31>=b;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return G;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return H;for(b=32;P>b;b++)if(0!==a.dyn_ltree[2*b])return H;return G}function z(a){pb||(m(),pb=!0),a.l_desc=new ob(a.dyn_ltree,kb),a.d_desc=new ob(a.dyn_dtree,lb),a.bl_desc=new ob(a.bl_tree,mb),a.bi_buf=0,a.bi_valid=0,n(a)}function A(a,b,c,d){g(a,(J<<1)+(d?1:0),3),p(a,b,c,!0)}function B(a){g(a,K<<1,3),h(a,X,eb),j(a)}function C(a,b,c,d){var e,f,h=0;a.level>0?(a.strm.data_type===I&&(a.strm.data_type=y(a)),t(a,a.l_desc),t(a,a.d_desc),h=w(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,e>=f&&(e=f)):e=f=c+5,e>=c+4&&-1!==b?A(a,b,c,d):a.strategy===F||f===e?(g(a,(K<<1)+(d?1:0),3),s(a,eb,fb)):(g(a,(L<<1)+(d?1:0),3),x(a,a.l_desc.max_code+1,a.d_desc.max_code+1,h+1),s(a,a.dyn_ltree,a.dyn_dtree)),n(a),d&&o(a)}function D(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(hb[c]+P+1)]++,a.dyn_dtree[2*e(b)]++),a.last_lit===a.lit_bufsize-1}var E=a("../utils/common"),F=4,G=0,H=1,I=2,J=0,K=1,L=2,M=3,N=258,O=29,P=256,Q=P+1+O,R=30,S=19,T=2*Q+1,U=15,V=16,W=7,X=256,Y=16,Z=17,$=18,_=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ab=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],bb=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],db=512,eb=new Array(2*(Q+2));d(eb);var fb=new Array(2*R);d(fb);var gb=new Array(db);d(gb);var hb=new Array(N-M+1);d(hb);var ib=new Array(O);d(ib);var jb=new Array(R);d(jb);var kb,lb,mb,nb=function(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length},ob=function(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b},pb=!1;c._tr_init=z,c._tr_stored_block=A,c._tr_flush_block=C,c._tr_tally=D,c._tr_align=B},{"../utils/common":27}],39:[function(a,b){"use strict";function c(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=c},{}]},{},[9])(9)});'use strict';if(tr.isVinn){global.JSZip=global.window.JSZip;global.window=undefined;}else if(tr.isNode){const jsZipAbsPath=HTMLImportsLoader.hrefToAbsolutePath('/jszip.min.js');const jsZipModule=require(jsZipAbsPath);global.JSZip=jsZipModule;}'use strict';tr.exportTo('tr.e.importer',function(){function ZipImporter(model,eventData){if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);} this.model_=model;this.eventData_=eventData;} ZipImporter.canImport=function(eventData){let header;if(eventData instanceof ArrayBuffer){header=new Uint8Array(eventData.slice(0,2));}else if(typeof(eventData)==='string'||eventData instanceof String){header=[eventData.charCodeAt(0),eventData.charCodeAt(1)];}else{return false;} return header[0]==='P'.charCodeAt(0)&&header[1]==='K'.charCodeAt(0);};ZipImporter.prototype={__proto__:tr.importer.Importer.prototype,get importerName(){return'ZipImporter';},isTraceDataContainer(){return true;},extractSubtraces(){const zip=new JSZip(this.eventData_);const subtraces=[];for(const idx in zip.files){subtraces.push(zip.files[idx].asBinary());} @@ -5549,7 +5549,7 @@ XMarkerAnnotationView.prototype={__proto__:tr.ui.annotations.AnnotationView.prototype,draw(ctx){const dt=this.viewport_.currentDisplayTransform;const viewX=dt.xWorldToView(this.annotation_.timestamp);ctx.beginPath();tr.ui.b.drawLine(ctx,viewX,0,viewX,ctx.canvas.height);ctx.strokeStyle=this.annotation_.strokeStyle;ctx.stroke();}};return{XMarkerAnnotationView,};});'use strict';tr.exportTo('tr.model',function(){function XMarkerAnnotation(timestamp){tr.model.Annotation.apply(this,arguments);this.timestamp=timestamp;this.strokeStyle='rgba(0, 0, 255, 0.5)';} XMarkerAnnotation.fromDict=function(dict){return new XMarkerAnnotation(dict.args.timestamp);};XMarkerAnnotation.prototype={__proto__:tr.model.Annotation.prototype,toDict(){return{typeName:'xmarker',args:{timestamp:this.timestamp}};},createView_(viewport){return new tr.ui.annotations.XMarkerAnnotationView(viewport,this);}};tr.model.Annotation.register(XMarkerAnnotation,{typeName:'xmarker'});return{XMarkerAnnotation,};});'use strict';tr.exportTo('tr.e.importer',function(){const Base64=tr.b.Base64;const deepCopy=tr.b.deepCopy;const ColorScheme=tr.b.ColorScheme;const HeapDumpTraceEventImporter=tr.e.importer.HeapDumpTraceEventImporter;const LegacyHeapDumpTraceEventImporter=tr.e.importer.LegacyHeapDumpTraceEventImporter;const StreamingEventExpander=tr.e.importer.StreamingEventExpander;const ProfilingDictionaryReader=tr.e.importer.ProfilingDictionaryReader;function getEventColor(event,opt_customName){if(event.cname){return ColorScheme.getColorIdForReservedName(event.cname);}else if(opt_customName||event.name){return ColorScheme.getColorIdForGeneralPurposeString(opt_customName||event.name);}} function isLegacyChromeClockSyncEvent(event){return event.name!==undefined&&event.name.startsWith(LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX)&&((event.ph==='S')||(event.ph==='F'));} -const PRODUCER='producer';const CONSUMER='consumer';const STEP='step';const BACKGROUND=tr.model.ContainerMemoryDump.LevelOfDetail.BACKGROUND;const LIGHT=tr.model.ContainerMemoryDump.LevelOfDetail.LIGHT;const DETAILED=tr.model.ContainerMemoryDump.LevelOfDetail.DETAILED;const MEMORY_DUMP_LEVEL_OF_DETAIL_ORDER=[undefined,BACKGROUND,LIGHT,DETAILED];const GLOBAL_MEMORY_ALLOCATOR_DUMP_PREFIX='global/';const LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX='ClockSyncEvent.';const BYTE_STAT_NAME_MAP={'pc':'privateCleanResident','pd':'privateDirtyResident','sc':'sharedCleanResident','sd':'sharedDirtyResident','pss':'proportionalResident','sw':'swapped'};const WEAK_MEMORY_ALLOCATOR_DUMP_FLAG=1<<0;const OBJECT_TYPE_NAME_PATTERNS=[{prefix:'const char *WOW::getStringWithTypeName() [T = ',suffix:']'},{prefix:'const char* WOW::getStringWithTypeName() [with T = ',suffix:']'},{prefix:'const char *__cdecl WOW::getStringWithTypeName<',suffix:'>(void)'}];const SUBTRACE_FIELDS=new Set(['powerTraceAsString','systemTraceEvents',]);const NON_METADATA_FIELDS=new Set(['displayTimeUnit','samples','stackFrames','traceAnnotations','traceEvents',...SUBTRACE_FIELDS]);function TraceEventImporter(model,eventData){this.hasEvents_=undefined;this.importPriority=1;this.model_=model;this.events_=undefined;this.sampleEvents_=undefined;this.stackFrameEvents_=undefined;this.stackFrameTree_=new tr.model.ProfileTree();this.subtraces_=[];this.eventsWereFromString_=false;this.softwareMeasuredCpuCount_=undefined;this.allAsyncEvents_=[];this.allFlowEvents_=[];this.allObjectEvents_=[];this.contextProcessorPerThread={};this.traceEventSampleStackFramesByName_={};this.v8ProcessCodeMaps_={};this.v8ProcessRootStackFrame_={};this.v8SamplingData_=[];this.profileTrees_=new Map();this.profileInfo_=new Map();this.legacyChromeClockSyncStartEvent_=undefined;this.legacyChromeClockSyncFinishEvent_=undefined;this.allMemoryDumpEvents_={};this.heapProfileExpander=new ProfilingDictionaryReader();this.objectTypeNameMap_={};this.clockDomainId_=tr.model.ClockDomainId.UNKNOWN_CHROME_LEGACY;this.toModelTime_=undefined;if(typeof(eventData)==='string'||eventData instanceof String){eventData=eventData.trim();if(eventData[0]==='['){eventData=eventData.replace(/\s*,\s*$/,'');if(eventData[eventData.length-1]!==']'){eventData=eventData+']';}} +const PRODUCER='producer';const CONSUMER='consumer';const STEP='step';const BACKGROUND=tr.model.ContainerMemoryDump.LevelOfDetail.BACKGROUND;const LIGHT=tr.model.ContainerMemoryDump.LevelOfDetail.LIGHT;const DETAILED=tr.model.ContainerMemoryDump.LevelOfDetail.DETAILED;const MEMORY_DUMP_LEVEL_OF_DETAIL_ORDER=[undefined,BACKGROUND,LIGHT,DETAILED];const GLOBAL_MEMORY_ALLOCATOR_DUMP_PREFIX='global/';const LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX='ClockSyncEvent.';const BYTE_STAT_NAME_MAP={'pc':'privateCleanResident','pd':'privateDirtyResident','sc':'sharedCleanResident','sd':'sharedDirtyResident','pss':'proportionalResident','sw':'swapped'};const WEAK_MEMORY_ALLOCATOR_DUMP_FLAG=1<<0;const OBJECT_TYPE_NAME_PATTERNS=[{prefix:'const char *WTF::getStringWithTypeName() [T = ',suffix:']'},{prefix:'const char* WTF::getStringWithTypeName() [with T = ',suffix:']'},{prefix:'const char *__cdecl WTF::getStringWithTypeName<',suffix:'>(void)'}];const SUBTRACE_FIELDS=new Set(['powerTraceAsString','systemTraceEvents',]);const NON_METADATA_FIELDS=new Set(['displayTimeUnit','samples','stackFrames','traceAnnotations','traceEvents',...SUBTRACE_FIELDS]);function TraceEventImporter(model,eventData){this.hasEvents_=undefined;this.importPriority=1;this.model_=model;this.events_=undefined;this.sampleEvents_=undefined;this.stackFrameEvents_=undefined;this.stackFrameTree_=new tr.model.ProfileTree();this.subtraces_=[];this.eventsWereFromString_=false;this.softwareMeasuredCpuCount_=undefined;this.allAsyncEvents_=[];this.allFlowEvents_=[];this.allObjectEvents_=[];this.contextProcessorPerThread={};this.traceEventSampleStackFramesByName_={};this.v8ProcessCodeMaps_={};this.v8ProcessRootStackFrame_={};this.v8SamplingData_=[];this.profileTrees_=new Map();this.profileInfo_=new Map();this.legacyChromeClockSyncStartEvent_=undefined;this.legacyChromeClockSyncFinishEvent_=undefined;this.allMemoryDumpEvents_={};this.heapProfileExpander=new ProfilingDictionaryReader();this.objectTypeNameMap_={};this.clockDomainId_=tr.model.ClockDomainId.UNKNOWN_CHROME_LEGACY;this.toModelTime_=undefined;if(typeof(eventData)==='string'||eventData instanceof String){eventData=eventData.trim();if(eventData[0]==='['){eventData=eventData.replace(/\s*,\s*$/,'');if(eventData[eventData.length-1]!==']'){eventData=eventData+']';}} this.events_=JSON.parse(eventData);this.eventsWereFromString_=true;}else{this.events_=eventData;} if(this.events_.traceEvents){const container=this.events_;this.events_=this.events_.traceEvents;for(const subtraceField of SUBTRACE_FIELDS){if(container[subtraceField]){this.storeSubtrace_(container[subtraceField]);}} this.storeSamples_(container.samples);this.storeStackFrames_(container.stackFrames);this.storeDisplayTimeUnit_(container.displayTimeUnit);this.storeTraceAnnotations_(container.traceAnnotations);this.storeMetadata_(container);}else if(this.events_ instanceof tr.b.TraceStream){const parser=oboe().node('{cat ph}',function(e){return oboe.drop;}).node('!.powerTraceAsString',this.storeSubtrace_.bind(this)).node('!.systemTraceEvents',this.storeSubtrace_.bind(this)).node('!.samples',this.storeSamples_.bind(this)).node('!.stackFrames',this.storeStackFrames_.bind(this)).node('!.displayTimeUnit',this.storeDisplayTimeUnit_.bind(this)).node('!.traceAnnotations',this.storeTraceAnnotations_.bind(this)).done(this.storeMetadata_.bind(this));this.events_.rewind();while(this.events_.hasData){parser.write(this.events_.readNumBytes());} @@ -6970,7 +6970,7 @@ get length(){return this._diagnostics.length;}*[Symbol.iterator](){for(const diagnostic of this._diagnostics)yield diagnostic;} asDictInto_(d){d.diagnostics=this._diagnostics.map(d=>d.asDictOrReference());} static fromDict(d){return new UnmergeableDiagnosticSet(d.diagnostics.map(d=>((typeof d==='string')?new tr.v.d.DiagnosticRef(d):tr.v.d.Diagnostic.fromDict(d))));}} -tr.v.d.Diagnostic.register(UnmergeableDiagnosticSet,{elementName:'tr-v-ui-unmergeable-diagnostic-set-span'});return{UnmergeableDiagnosticSet,};});'use strict';tr.exportTo('tr.v.d',function(){const RESERVED_INFOS={ANGLE_REVISIONS:{name:'angleRevisions',type:tr.v.d.GenericSet},ARCHITECTURES:{name:'architectures',type:tr.v.d.GenericSet},BENCHMARKS:{name:'benchmarks',type:tr.v.d.GenericSet},BENCHMARK_START:{name:'benchmarkStart',type:tr.v.d.DateRange},BENCHMARK_DESCRIPTIONS:{name:'benchmarkDescriptions',type:tr.v.d.GenericSet},BOTS:{name:'bots',type:tr.v.d.GenericSet},BUG_COMPONENTS:{name:'bugComponents',type:tr.v.d.GenericSet},BUILDS:{name:'builds',type:tr.v.d.GenericSet},CATAPULT_REVISIONS:{name:'catapultRevisions',type:tr.v.d.GenericSet},CHROMIUM_COMMIT_POSITIONS:{name:'chromiumCommitPositions',type:tr.v.d.GenericSet},CHROMIUM_REVISIONS:{name:'chromiumRevisions',type:tr.v.d.GenericSet},DEVICE_IDS:{name:'deviceIds',type:tr.v.d.GenericSet},GPUS:{name:'gpus',type:tr.v.d.GenericSet},GROUPING_PATH:{name:'groupingPath',type:tr.v.d.GroupingPath},IS_REFERENCE_BUILD:{name:'isReferenceBuild',type:tr.v.d.GenericSet},LABELS:{name:'labels',type:tr.v.d.GenericSet},LOG_URLS:{name:'logUrls',type:tr.v.d.GenericSet},PRIMARYS:{name:'primarys',type:tr.v.d.GenericSet},MEMORY_AMOUNTS:{name:'memoryAmounts',type:tr.v.d.GenericSet},MERGED_FROM:{name:'mergedFrom',type:tr.v.d.RelatedHistogramMap},MERGED_TO:{name:'mergedTo',type:tr.v.d.RelatedHistogramMap},OS_NAMES:{name:'osNames',type:tr.v.d.GenericSet},OS_VERSIONS:{name:'osVersions',type:tr.v.d.GenericSet},OWNERS:{name:'owners',type:tr.v.d.GenericSet},PRODUCT_VERSIONS:{name:'productVersions',type:tr.v.d.GenericSet},RELATED_NAMES:{name:'relatedNames',type:tr.v.d.GenericSet},SKIA_REVISIONS:{name:'skiaRevisions',type:tr.v.d.GenericSet},STORIES:{name:'stories',type:tr.v.d.GenericSet},STORYSET_REPEATS:{name:'storysetRepeats',type:tr.v.d.GenericSet},STORY_TAGS:{name:'storyTags',type:tr.v.d.GenericSet},TAG_MAP:{name:'tagmap',type:tr.v.d.TagMap},TRACE_START:{name:'traceStart',type:tr.v.d.DateRange},TRACE_URLS:{name:'traceUrls',type:tr.v.d.GenericSet},V8_COMMIT_POSITIONS:{name:'v8CommitPositions',type:tr.v.d.DateRange},V8_REVISIONS:{name:'v8Revisions',type:tr.v.d.GenericSet},WEBRTC_REVISIONS:{name:'webrtcRevisions',type:tr.v.d.GenericSet},};const RESERVED_NAMES={};const RESERVED_NAMES_TO_TYPES=new Map();for(const[codename,info]of Object.entries(RESERVED_INFOS)){RESERVED_NAMES[codename]=info.name;if(RESERVED_NAMES_TO_TYPES.has(info.name)){throw new Error(`Duplicate reserved name "${info.name}"`);} +tr.v.d.Diagnostic.register(UnmergeableDiagnosticSet,{elementName:'tr-v-ui-unmergeable-diagnostic-set-span'});return{UnmergeableDiagnosticSet,};});'use strict';tr.exportTo('tr.v.d',function(){const RESERVED_INFOS={ANGLE_REVISIONS:{name:'angleRevisions',type:tr.v.d.GenericSet},ARCHITECTURES:{name:'architectures',type:tr.v.d.GenericSet},BENCHMARKS:{name:'benchmarks',type:tr.v.d.GenericSet},BENCHMARK_START:{name:'benchmarkStart',type:tr.v.d.DateRange},BENCHMARK_DESCRIPTIONS:{name:'benchmarkDescriptions',type:tr.v.d.GenericSet},BOTS:{name:'bots',type:tr.v.d.GenericSet},BUG_COMPONENTS:{name:'bugComponents',type:tr.v.d.GenericSet},BUILDS:{name:'builds',type:tr.v.d.GenericSet},CATAPULT_REVISIONS:{name:'catapultRevisions',type:tr.v.d.GenericSet},CHROMIUM_COMMIT_POSITIONS:{name:'chromiumCommitPositions',type:tr.v.d.GenericSet},CHROMIUM_REVISIONS:{name:'chromiumRevisions',type:tr.v.d.GenericSet},DEVICE_IDS:{name:'deviceIds',type:tr.v.d.GenericSet},GPUS:{name:'gpus',type:tr.v.d.GenericSet},GROUPING_PATH:{name:'groupingPath',type:tr.v.d.GroupingPath},IS_REFERENCE_BUILD:{name:'isReferenceBuild',type:tr.v.d.GenericSet},LABELS:{name:'labels',type:tr.v.d.GenericSet},LOG_URLS:{name:'logUrls',type:tr.v.d.GenericSet},MASTERS:{name:'masters',type:tr.v.d.GenericSet},MEMORY_AMOUNTS:{name:'memoryAmounts',type:tr.v.d.GenericSet},MERGED_FROM:{name:'mergedFrom',type:tr.v.d.RelatedHistogramMap},MERGED_TO:{name:'mergedTo',type:tr.v.d.RelatedHistogramMap},OS_NAMES:{name:'osNames',type:tr.v.d.GenericSet},OS_VERSIONS:{name:'osVersions',type:tr.v.d.GenericSet},OWNERS:{name:'owners',type:tr.v.d.GenericSet},PRODUCT_VERSIONS:{name:'productVersions',type:tr.v.d.GenericSet},RELATED_NAMES:{name:'relatedNames',type:tr.v.d.GenericSet},SKIA_REVISIONS:{name:'skiaRevisions',type:tr.v.d.GenericSet},STORIES:{name:'stories',type:tr.v.d.GenericSet},STORYSET_REPEATS:{name:'storysetRepeats',type:tr.v.d.GenericSet},STORY_TAGS:{name:'storyTags',type:tr.v.d.GenericSet},TAG_MAP:{name:'tagmap',type:tr.v.d.TagMap},TRACE_START:{name:'traceStart',type:tr.v.d.DateRange},TRACE_URLS:{name:'traceUrls',type:tr.v.d.GenericSet},V8_COMMIT_POSITIONS:{name:'v8CommitPositions',type:tr.v.d.DateRange},V8_REVISIONS:{name:'v8Revisions',type:tr.v.d.GenericSet},WEBRTC_REVISIONS:{name:'webrtcRevisions',type:tr.v.d.GenericSet},};const RESERVED_NAMES={};const RESERVED_NAMES_TO_TYPES=new Map();for(const[codename,info]of Object.entries(RESERVED_INFOS)){RESERVED_NAMES[codename]=info.name;if(RESERVED_NAMES_TO_TYPES.has(info.name)){throw new Error(`Duplicate reserved name "${info.name}"`);} RESERVED_NAMES_TO_TYPES.set(info.name,info.type);} const RESERVED_NAMES_SET=new Set(Object.values(RESERVED_NAMES));return{RESERVED_INFOS,RESERVED_NAMES,RESERVED_NAMES_SET,RESERVED_NAMES_TO_TYPES,};});'use strict';tr.exportTo('tr.v.d',function(){class DiagnosticMap extends Map{constructor(opt_allowReservedNames){super();if(opt_allowReservedNames===undefined){opt_allowReservedNames=true;} this.allowReservedNames_=opt_allowReservedNames;} @@ -7244,10 +7244,10 @@ if(dict.callback===undefined){throw new Error('callback must be given');} this.eventType_=dict.eventType;this.keyCodes_=[];if(dict.keyCode){this.pushKeyCode_(dict.keyCode);}else if(dict.keyCodes){dict.keyCodes.forEach(this.pushKeyCode_,this);} this.useCapture_=!!dict.useCapture;this.callback_=dict.callback;this.thisArg_=dict.thisArg!==undefined?dict.thisArg:undefined;this.helpText_=dict.helpText!==undefined?dict.helpText:undefined;} -HotKey.prototype={get eventType(){return this.eventType_;},get keyCodes(){return this.keyCodes_;},get helpText(){return this.helpText_;},call(e){this.callback_.call(this.thisArg_,e);},pushKeyCode_(keyCode){this.keyCodes_.push(keyCode);}};return{HotKey,};});'use strict';Polymer({is:'tv-ui-b-hotkey-controller',created(){this.isAttached_=false;this.globalMode_=false;this.coupledToParentController_=undefined;this.curHost_=undefined;this.childControllers_=[];this.bubblingKeyDownHotKeys_={};this.capturingKeyDownHotKeys_={};this.bubblingKeyPressHotKeys_={};this.capturingKeyPressHotKeys_={};this.onBubblingKeyDown_=this.onKey_.bind(this,false);this.onCapturingKeyDown_=this.onKey_.bind(this,true);this.onBubblingKeyPress_=this.onKey_.bind(this,false);this.onCapturingKeyPress_=this.onKey_.bind(this,true);},attached(){this.isAttached_=true;const host=this.findHost_();if(host.__hotkeyController){throw new Error('Multiple hotkey controllers attached to this host');} +HotKey.prototype={get eventType(){return this.eventType_;},get keyCodes(){return this.keyCodes_;},get helpText(){return this.helpText_;},call(e){this.callback_.call(this.thisArg_,e);},pushKeyCode_(keyCode){this.keyCodes_.push(keyCode);}};return{HotKey,};});'use strict';Polymer({is:'tv-ui-b-hotkey-controller',created(){this.isAttached_=false;this.globalMode_=false;this.slavedToParentController_=undefined;this.curHost_=undefined;this.childControllers_=[];this.bubblingKeyDownHotKeys_={};this.capturingKeyDownHotKeys_={};this.bubblingKeyPressHotKeys_={};this.capturingKeyPressHotKeys_={};this.onBubblingKeyDown_=this.onKey_.bind(this,false);this.onCapturingKeyDown_=this.onKey_.bind(this,true);this.onBubblingKeyPress_=this.onKey_.bind(this,false);this.onCapturingKeyPress_=this.onKey_.bind(this,true);},attached(){this.isAttached_=true;const host=this.findHost_();if(host.__hotkeyController){throw new Error('Multiple hotkey controllers attached to this host');} host.__hotkeyController=this;this.curHost_=host;let parentElement;if(host.parentElement){parentElement=host.parentElement;}else{parentElement=Polymer.dom(host).parentNode.host;} -const parentController=tr.b.getHotkeyControllerForElement(parentElement);if(parentController){this.coupledToParentController_=parentController;parentController.addChildController_(this);return;} -host.addEventListener('keydown',this.onBubblingKeyDown_,false);host.addEventListener('keydown',this.onCapturingKeyDown_,true);host.addEventListener('keypress',this.onBubblingKeyPress_,false);host.addEventListener('keypress',this.onCapturingKeyPress_,true);},detached(){this.isAttached_=false;const host=this.curHost_;if(!host)return;delete host.__hotkeyController;this.curHost_=undefined;if(this.coupledToParentController_){this.coupledToParentController_.removeChildController_(this);this.coupledToParentController_=undefined;return;} +const parentController=tr.b.getHotkeyControllerForElement(parentElement);if(parentController){this.slavedToParentController_=parentController;parentController.addChildController_(this);return;} +host.addEventListener('keydown',this.onBubblingKeyDown_,false);host.addEventListener('keydown',this.onCapturingKeyDown_,true);host.addEventListener('keypress',this.onBubblingKeyPress_,false);host.addEventListener('keypress',this.onCapturingKeyPress_,true);},detached(){this.isAttached_=false;const host=this.curHost_;if(!host)return;delete host.__hotkeyController;this.curHost_=undefined;if(this.slavedToParentController_){this.slavedToParentController_.removeChildController_(this);this.slavedToParentController_=undefined;return;} host.removeEventListener('keydown',this.onBubblingKeyDown_,false);host.removeEventListener('keydown',this.onCapturingKeyDown_,true);host.removeEventListener('keypress',this.onBubblingKeyPress_,false);host.removeEventListener('keypress',this.onCapturingKeyPress_,true);},addChildController_(controller){const i=this.childControllers_.indexOf(controller);if(i!==-1){throw new Error('Controller already registered');} this.childControllers_.push(controller);},removeChildController_(controller){const i=this.childControllers_.indexOf(controller);if(i===-1){throw new Error('Controller not registered');} this.childControllers_.splice(i,1);return controller;},getKeyMapForEventType_(eventType,useCapture){if(eventType==='keydown'){if(!useCapture){return this.bubblingKeyDownHotKeys_;} @@ -7262,7 +7262,7 @@ keyMap[keyCode]=hotKey;} for(let i=0;i<hotKey.keyCodes.length;i++){const keyCode=hotKey.keyCodes[i];delete keyMap[keyCode];} return hotKey;},get globalMode(){return this.globalMode_;},set globalMode(globalMode){const wasAttached=this.isAttached_;if(wasAttached){this.detached();} -this.globalMode_=!!globalMode;if(wasAttached){this.attached();}},get topmostConroller_(){if(this.coupledToParentController_){return this.coupledToParentController_.topmostConroller_;} +this.globalMode_=!!globalMode;if(wasAttached){this.attached();}},get topmostConroller_(){if(this.slavedToParentController_){return this.slavedToParentController_.topmostConroller_;} return this;},childRequestsGeneralFocus(child){const topmost=this.topmostConroller_;if(topmost.curHost_){if(topmost.curHost_.hasAttribute('tabIndex')){topmost.curHost_.focus();}else{if(document.activeElement){document.activeElement.blur();}}}else{if(document.activeElement){document.activeElement.blur();}}},childRequestsBlur(child){child.blur();const topmost=this.topmostConroller_;if(topmost.curHost_){topmost.curHost_.focus();}},findHost_(){if(this.globalMode_)return document.body;if(this.parentElement)return this.parentElement;if(!Polymer.dom(this).parentNode)return this.host;let node=this.parentNode;while(Polymer.dom(node).parentNode)node=Polymer.dom(node).parentNode;return node.host;},appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e){const localKeyMap=this.getKeyMapForEventType_(e.type,useCapture);const localHotKey=localKeyMap[e.keyCode];if(localHotKey){matchedHotKeys.push(localHotKey);} for(let i=0;i<this.childControllers_.length;i++){const controller=this.childControllers_[i];controller.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);}},onKey_(useCapture,e){if(!useCapture&&e.path[0].tagName==='INPUT')return;let sortedControllers;const matchedHotKeys=[];this.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);if(matchedHotKeys.length===0)return false;if(matchedHotKeys.length>1){throw new Error('More than one hotKey is currently unsupported');} const hotKey=matchedHotKeys[0];let prevented=0;prevented|=hotKey.call(e);return!prevented&&e.defaultPrevented;}});'use strict';tr.exportTo('tr.b',function(){function getHotkeyControllerForElement(refElement){let curElement=refElement;while(curElement){if(curElement.tagName==='tv-ui-b-hotkey-controller'){return curElement;} @@ -7614,7 +7614,7 @@ const ans={supported:false};for(const proc of Object.values(m.processes)){proc.objects.iterObjectInstances(function(instance){if(instance instanceof BlameContextInstance){ans.supported=true;}});} if(!ans.supported){ans.reason='No frame data available';} return ans;},get currentRangeOfInterest(){if(this.rangeOfInterest_.isEmpty){return this.model_.bounds;} -return this.rangeOfInterest_;},get rangeOfInterest(){return this.rangeOfInterest_;},set rangeOfInterest(rangeOfInterest){this.rangeOfInterest_=rangeOfInterest;this.updateContents_();},get selection(){},set selection(_){},get textLabel(){return'Frame Data';},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();}});tr.ui.side_panel.SidePanelRegistry.register(function(){return document.createElement('tr-ui-e-s-frame-data-side-panel');});});'use strict';Polymer({is:'tr-ui-b-chart-legend-key',ready(){this.$.checkbox.addEventListener('change',this.onCheckboxChange_.bind(this));},onCheckboxChange_(){tr.b.dispatchSimpleEvent(this,tr.ui.b.DataSeriesEnableChangeEventType,true,false,{key:Polymer.dom(this).textContent,enabled:this.enabled});},set textContent(t){Polymer.dom(this.$.label).textContent=t;Polymer.dom(this.$.link).textContent=t;this.updateContents_();},set width(w){w-=20;this.$.link.style.width=w+'px';this.$.label.style.width=w+'px';},get textContent(){return Polymer.dom(this.$.label).textContent;},set optional(optional){this.$.checkbox.style.visibility=optional?'visible':'hidden';},get optional(){return this.$.checkbox.style.visibility==='visible';},set enabled(enabled){this.$.checkbox.checked=enabled?'checked':'';},get enabled(){return this.$.checkbox.checked;},set color(c){this.$.label.style.color=c;this.$.link.color=c;},set target(target){this.$.link.setSelectionAndContent(target,Polymer.dom(this.$.label).textContent);this.updateContents_();},get target(){return this.$.link.selection;},set title(title){this.$.link.title=title;},updateContents_(){this.$.link.style.display=this.target?'':'none';this.$.label.style.display=this.target?'none':'';this.$.label.htmlFor=this.optional?'checkbox':'';}});'use strict';(function(window){window.define=function(x){window.d3=x;};window.define.amd=true;})(this);!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function d(){Xo.event.preventDefault()}function m(){for(var n,t=Xo.event;n=t.sourceEvent;)t=n;return t}function y(n){for(var t=new p,e=0,r=arguments.length;++e<r;)t[arguments[e]]=v(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=Xo.event;u.target=n,Xo.event=u,t[u.type].apply(e,r)}finally{Xo.event=i}}},t}function x(n){return fa(n,da),n}function M(n){return"function"==typeof n?n:function(){return ha(n,this)}}function _(n){return"function"==typeof n?n:function(){return ga(n,this)}}function b(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=Xo.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function w(n){return n.trim().replace(/\s+/g," ")}function S(n){return new RegExp("(?:^|\\s+)"+Xo.requote(n)+"(?:\\s+|$)","g")}function k(n){return n.trim().split(/^|\s+/)}function E(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=k(n).map(A);var u=n.length;return"function"==typeof t?r:e}function A(n){var t=S(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",w(u+" "+n))):e.setAttribute("class",w(u.replace(t," ")))}}function C(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function N(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function L(n){return"function"==typeof n?n:(n=Xo.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function T(n){return{__data__:n}}function q(n){return function(){return va(this,n)}}function z(n){return arguments.length||(n=Xo.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function R(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function U(){var n=this.__transition__;n&&++n.active}function j(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=c(t,Bo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+Xo.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),c=H;a>0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o,a=0,c=0,s=0;if(u=/([a-z]+)\((.*)\)/i.exec(n))switch(i=u[2].split(","),u[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Mt(i[0]),Mt(i[1]),Mt(i[2]))}return(o=Va.get(n))?t(o.r,o.g,o.b):(null!=n&&"#"===n.charAt(0)&&(r=parseInt(n.substring(1),16),isNaN(r)||(4===n.length?(a=(3840&r)>>4,a=a>>4|a,c=240&r,c=c>>4|c,s=15&r,s=s<<4|s):7===n.length&&(a=(16711680&r)>>16,c=(65280&r)>>8,s=255&r))),t(a,c,s))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return $a=n,e}function Nt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Lt(n,t){var e=Math.pow(10,3*oa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Tt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function zt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=zt;var r=new zt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=zt;var r=new zt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=uc[e=n.charAt(++a)])&&(e=n.charAt(++a)),(i=C[e])&&(e=i(t,null==u?"e"===e?" ":"0":u)),o.push(e),c=a+1);return o.push(n.substring(c,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},u=e(r,n,t,0);if(u!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var i=null!=r.Z&&ec!==zt,o=new(i?zt:ec);return"j"in r?o.setFullYear(r.y,0,r.j):"w"in r&&("W"in r||"U"in r)?(o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+Math.floor(r.Z/100),r.M+r.Z%100,r.S,r.L),i?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=zt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=zt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function Ft(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ot(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Yt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function It(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Zt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.y=Xt(+r[0]),e+r[0].length):-1}function Vt(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Xt(n){return n+(n>68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function re(){}function ue(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function ie(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function oe(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function ae(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)oe(n[e],t,1);t.polygonEnd()}function ce(){function n(n,t){n*=Na,t=t*Na/2+Sa/4;var e=n-r,o=e>=0?1:-1,a=o*e,c=Math.cos(t),s=Math.sin(t),l=i*s,f=u*c+l*Math.cos(a),h=l*o*Math.sin(a);hc.add(Math.atan2(h,f)),r=n,u=c,i=s}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*Na,u=Math.cos(a=(e=a)*Na/2+Sa/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function se(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function le(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function fe(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function they(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ge(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function pe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function ve(n){return[Math.atan2(n[1],n[0]),X(n[2])]}function de(n,t){return oa(n[0]-t[0])<Aa&&oa(n[1]-t[1])<Aa}function me(n,t){n*=Na;var e=Math.cos(t*=Na);ye(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function ye(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function xe(){function n(n,u){n*=Na;var i=Math.cos(u*=Na),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),ye(t,e,r)}var t,e,r;kc.point=function(u,i){u*=Na;var o=Math.cos(i*=Na);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,ye(t,e,r)}}function Me(){kc.point=me}function _e(){function n(n,t){n*=Na;var e=Math.cos(t*=Na),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-V(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),ye(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=Na;var c=Math.cos(a*=Na);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),ye(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Me,kc.point=me}}function be(){return!0}function we(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(de(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function ke(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Ee(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Sa,k=p*x;if(hc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*ka:_,S^h>=e^m>=e){var E=fe(se(f),se(n));pe(E);var A=fe(u,E);pe(A);var C=(S^_>=0?-1:1)*X(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function Te(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)<Aa?(n.point(e,r=(r+o)/2>0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)<Aa&&(e-=u*Aa),oa(i-a)<Aa&&(i-=a*Aa),r=qe(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function qe(n,t,e,r){var u,i,o=Math.sin(n-e);return oa(o)>Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function ze(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]<t[0]?Sa:-Sa;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function Re(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);they(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(they(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)<Aa,N=C||Aa>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)<Aa?k:E):k<=_[1]&&_[1]<=E:A>Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return they(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)<Aa?u>0?0:3:oa(r[0]-e)<Aa?u>0?2:1:oa(r[1]-t)<Aa?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function They(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)<Aa||oa(r-h)<Aa?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||oa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)<Aa?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Sa/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=I(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ea]},e):xr}function yr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return oa(u)<Aa?er:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-I(u)*Math.sqrt(n*n+e*e)]},e)}function xr(n,t){return[n,Math.log(Math.tan(Sa/4+t/2))]}function Mr(n){var t,e=Qe(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=Sa*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function _r(n,t){return[Math.log(Math.tan(Sa/4+t/2)),-n]}function br(n){return n[0]}function wr(n){return n[1]}function Sr(n){for(var t=n.length,e=[0,1],r=2,u=2;t>u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function Tr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Tr(n);for(var c=i;c.circle&&oa(e-c.circle.x)<Aa&&oa(r-c.circle.cy)<Aa;)i=c.P,a.unshift(c),Tr(c),c=i;a.unshift(c),Or(c);for(var s=o;s.circle&&oa(e-s.circle.x)<Aa&&oa(r-s.circle.cy)<Aa;)o=s.N,a.push(s),Tr(s),s=o;a.push(s),Or(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function zr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)<Aa&&p-u>Aa?{x:f,y:oa(t-f)<Aa?e:p}:oa(u-p)<Aa&&h-r>Aa?{x:oa(e-p)<Aa?t:h,y:p}:oa(r-h)<Aa&&u-g>Aa?{x:h,y:oa(t-h)<Aa?e:g}:oa(u-g)<Aa&&r-f>Aa?{x:oa(e-g)<Aa?t:f,y:g}:null),i.site,null)),++c)}function jr(n,t){return t.angle-n.angle}function Hr(){Jr(this),this.x=this.y=this.arc=this.site=this.cy=null}function Fr(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function Or(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),Jr(t),n.circle=null)}function Yr(n){for(var t,e=Vc,r=De(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!Ir(t,n)||!r(t)||oa(t.a.x-t.b.x)<Aa&&oa(t.a.y-t.b.y)<Aa)&&(t.a=t.b=null,e.splice(u,1))}function Ir(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2;if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function Zr(n,t){this.l=n,this.r=t,this.a=this.b=null}function Vr(n,t,e,r){var u=new Zr(n,t);return Vc.push(u),e&&$r(u,n,t,e),r&&$r(u,t,n,r),Xc[n.i].edges.push(new Br(u,n,t)),Xc[t.i].edges.push(new Br(u,t,n)),u}function Xr(n,t,e){var r=new Zr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function $r(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function Br(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function Wr(){this._=null}function Jr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function Gr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function Kr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Qr(n){for(;n.L;)n=n.L;return n}function nu(n,t){var e,r,u,i=n.sort(tu).pop();for(Vc=[],Xc=new Array(n.length),$c=new Wr,Wc=new Wr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new Pr(i),zr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;qr(u.arc)}t&&(Yr(t),Ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function tu(n,t){return t.y-n.y||t.x-n.x}function eu(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function ru(n){return n.x}function uu(n){return n.y}function iu(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function ou(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&ou(n,c[0],e,r,o,a),c[1]&&ou(n,c[1],o,r,u,a),c[2]&&ou(n,c[2],e,a,o,i),c[3]&&ou(n,c[3],o,a,u,i)}}function au(n,t){n=Xo.rgb(n),t=Xo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+vt(Math.round(e+i*n))+vt(Math.round(r+o*n))+vt(Math.round(u+a*n))}}function cu(n,t){var e,r={},u={};for(e in n)e in t?r[e]=fu(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function su(n,t){return t-=n=+n,function(e){return n+t*e}}function lu(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=Tu(t,e),i=qu(zu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*La,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*La:0}function Tu(n,t){return n[0]*t[0]+n[1]*t[1]}function qu(n){var t=Math.sqrt(Tu(n,n));return t&&(n[0]/=t,n[1]/=t),t}function zu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ru(n,t){var e,r=[],u=[],i=Xo.transform(n),o=Xo.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:su(a[0],c[0])},{i:3,x:su(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function Du(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function Pu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function Uu(n){for(var t=n.source,e=n.target,r=Hu(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function ju(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Hu(n,t){if(n===t)return n;for(var e=ju(n),r=ju(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function Fu(n){n.fixed|=2}function Ou(n){n.fixed&=-7}function Yu(n){n.fixed|=4,n.px=n.x,n.py=n.y}function Iu(n){n.fixed&=-5}function Zu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(Zu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function Vu(n,t){return Xo.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=Wu,n}function Xu(n){return n.children}function $u(n){return n.value}function Bu(n,t){return t.value-n.value}function Wu(n){return Xo.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function Ju(n){return n.x}function Gu(n){return n.y}function Ku(n,t,e){n.y0=t,n.y=e}function Qu(n){return Xo.range(n.length)}function ni(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function ti(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=li(e[i],t),n)>0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function vi(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Mi(r,u=a):Mi(r=c,u),o--):(xi(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)ki(u[i],t,e,r)}function Ei(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Ai(n){return 1+Xo.max(n,function(n){return n.y})}function Ci(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ni(n){var t=n.children;return t&&t.length?Ni(t[0]):n}function Li(n){var t,e=n.children;return e&&(t=e.length)?Li(e[t-1]):n}function Ti(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function qi(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function zi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():zi(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=Xo.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function Hi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=zi(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=zi(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return zi(t.a[0])},e.copy=function(){return Ji(n,t)},e.domain(n)}function Gi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=Xo.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[Xo.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort(Xo.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return Gi(n,t)},e()}function Ki(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=br,r=wr,u=be,i=oo,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=Ms.get(n)||oo).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function oo(n){return n.join("L")}function ao(n){return oo(n)+"Z"}function co(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function lo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function fo(n,t){return n.length<4?oo(n):n[1]+po(n.slice(1,n.length-1),vo(n,t))}function ho(n,t){return n.length<3?oo(n):n[0]+po((n.push(n[0]),n),vo([n[n.length-2]].concat(n,[n[1]]),t))}function go(n,t){return n.length<3?oo(n):n[0]+po(n,vo(n,t))}function po(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return oo(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function vo(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function mo(n){if(n.length<3)return oo(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",_o(ws,o),",",_o(ws,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),bo(c,o,a);return n.pop(),c.push("L",r),c.join("")}function yo(n){if(n.length<4)return oo(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(_o(ws,i)+","+_o(ws,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),bo(e,i,o);return e.join("")}function xo(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[_o(ws,o),",",_o(ws,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),bo(t,o,a);return t.join("")}function Mo(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return mo(n)}function _o(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function bo(n,t,e){n.push("C",_o(_s,t),",",_o(_s,e),",",_o(bs,t),",",_o(bs,e),",",_o(ws,t),",",_o(ws,e))}function wo(n,t){return(t[1]-n[1])/(t[0]-n[0])}function So(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=wo(u,i);++t<e;)r[t]=(o+(o=wo(u=i,i=n[t+1])))/2;return r[t]=o,r}function ko(n){for(var t,e,r,u,i=[],o=So(n),a=-1,c=n.length-1;++a<c;)t=wo(n[a],n[a+1]),oa(t)<Aa?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ys,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Co(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=_t(e),_=_t(u),b=e===r?function(){return g}:_t(r),w=u===i?function(){return p}:_t(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=br,r=br,u=0,i=wr,o=be,a=oo,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=Ms.get(n)||oo).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function No(n){return n.radius}function Lo(n){return[n.x,n.y]}function To(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ys;return[e*Math.cos(r),e*Math.sin(r)]}}function qo(){return 64}function zo(){return"circle"}function Ro(n){var t=Math.sqrt(n/Sa);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Do(n,t){return fa(n,Ns),n.id=t,n}function Po(n,t,e,r){var u=n.id;return R(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function Uo(n){return null==n&&(n=""),function(){this.textContent=n}}function jo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,Xo.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]<js[i]/u?i-1:i]:[Os,Yi(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=zi(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=zi(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.3"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},Xo.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},Xo.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r;return i?u+i*(n[r]-u):u},Xo.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var ia=Xo.bisector(function(n){return n});Xo.bisectLeft=ia.left,Xo.bisect=Xo.bisectRight=ia.right,Xo.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},Xo.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},Xo.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},Xo.transpose=function(n){return Xo.zip.apply(Xo,n)},Xo.keys=function(n){var t=[];for(var e in n)t.push(e);return t},Xo.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},Xo.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},Xo.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},Xo.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:i,get:function(n){return this[aa+n]},set:function(n,t){return this[aa+n]=t},remove:o,keys:a,values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1),this[t])}});var aa="\x00",ca=aa.charCodeAt(0);Xo.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=f(n,t,t[e]);return n};var sa=["webkit","ms","moz","Moz","o","O"];Xo.dispatch=function(){for(var n=new p,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=v(n);return n},p.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=Sizzle,va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return x(i)},da.selectAll=function(n){var t,e,r=[];n=_(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Bo(n.call(e,e.__data__,a,u))),t.parentNode=e);return x(r)};var ma={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};Xo.ns={prefix:ma,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!S(n[u]).test(t))return!1;return!0}for(t in n)this.each(E(t,n[t]));return this}return this.each(E(n,t))},da.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=T(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=T(o);for(;f>r;++r)p[r]=T(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=D([]),s=x([]),l=x([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},da.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},da.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=z.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},da.each=function(n){return R(this,function(t,e,r){n.call(t,t.__data__,e,r)})},da.call=function(n){var t=Bo(arguments);return n.apply(t[0]=this,t),this},da.empty=function(){return!this.node()},da.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return x(o)},ya.insert=function(n,t){return arguments.length<2&&(t=P(this)),da.insert.call(this,n,t)},da.transition=function(){for(var n,t,e=ks||++Ls,r=[],u=Es||{time:Date.now(),ease:yu,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&jo(t,c,e,u),n.push(t)}return Do(r,e)},da.interrupt=function(){return this.each(U)},Xo.select=function(n){var t=["string"==typeof n?ha(n,Wo):n];return t.parentNode=Jo,x([t])},Xo.selectAll=function(n){var t=Bo("string"==typeof n?ga(n,Wo):n);return t.parentNode=Jo,x([t])};var xa=Xo.select(Jo);da.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,Ta=Math.SQRT2,qa=2,za=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(Ta*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(Ta*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+za*f)/(2*i*qa*h),p=(c*c-i*i-za*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Ta;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=T.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=T.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=T.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=T.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",T=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=T.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,T,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<s;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;s>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv(" ","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;zt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:Tt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)ie(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){oe(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)oe(e[r],t,0)},Polygon:function(n,t){ae(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)ae(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)ie(e[r],t)}};Xo.geo.area=function(n){return fc=0,Xo.geo.stream(n,gc),fc};var fc,hc=new re,gc={sphere:function(){fc+=4*Sa},point:g,lineStart:g,lineEnd:g,polygonStart:function(){hc.reset(),gc.lineStart=ce},polygonEnd:function(){var n=2*hc;fc+=0>n?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,Te,ze,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(They)}).raw=They,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t<l.length-h;++t)g.push(n[a[l[t]][2]]);return g}var e=br,r=wr;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Xo.geom.polygon=function(n){return fa(n,Zc),n};var Zc=Xo.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=Cr(n),c=-1,s=this.length-Cr(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Er(o,l,u)?(Er(i,l,u)||n.push(Ar(i,o,l,u)),n.push(o)):Er(i,l,u)&&n.push(Ar(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];Pr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(jr),t.length},Br.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},Wr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Qr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(Gr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Kr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(Kr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Gr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Qr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,Gr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,Kr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,Gr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,Kr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,Gr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,Kr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},Xo.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return nu(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&eu(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=_t(r=n),t):r},t.y=function(n){return arguments.length?(o=_t(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];Xo.geom.delaunay=function(n){return Xo.geom.voronoi().triangles(n)},Xo.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(oa(c-e)+oa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=br,c=wr;return(o=arguments.length)?(a=ru,c=uu,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},Xo.interpolateRgb=au,Xo.interpolateObject=cu,Xo.interpolateNumber=su,Xo.interpolateString=lu;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;Xo.interpolate=fu,Xo.interpolators=[function(n,t){var e=typeof t;return("string"===e?Va.has(t)||/^(#|rgb\(|hsl\()/.test(t)?au:lu:t instanceof G?au:"object"===e?Array.isArray(t)?hu:cu:su)(n,t)}],Xo.interpolateArray=hu;var ns=function(){return bt},ts=Xo.map({linear:ns,poly:xu,quad:function(){return du},cubic:function(){return mu},sin:function(){return Mu},exp:function(){return _u},circle:function(){return bu},elastic:wu,back:Su,bounce:function(){return ku}}),es=Xo.map({"in":bt,out:pu,"in-out":vu,"out-in":function(n){return vu(pu(n))}});Xo.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Uu(n[e]));return t}},Xo.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=Xo.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push(Xo.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(ka-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},Xo.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=u-e,c=i*i+o*o;if(c>a*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=m.length,l=y.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=Bu,u=Xu,i=$u;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},Xo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=Xo.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},Vu(e,r)},Xo.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/Xo.sum(o),s=Xo.range(i.length);null!=e&&s.sort(e===as?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=as,r=0,u=ka;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var as={};Xo.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=Xo.permute(s,f),l=Xo.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;vi(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=si(a),i=ci(i),a&&i;)c=ci(c),o=si(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=Xo.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ti,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ti(t):qi(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return qi(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ti:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},Vu(i,a)},Xo.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[])},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(To(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=zo,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),jo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return Do(i,u)},Ns.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=_(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&jo(u,g,o,i),t.push(u)}return Do(a,o)},Ns.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=Ts,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":Ts,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ts="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return zs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=f[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=Ri(t),c=a[0],s=a[1],p=L[e],v=e?f:l,d=v[1]-v[0];return C&&(c-=p,s-=d+p),r=(e?g:h)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=p)+d:(x&&(p=Math.max(c,Math.min(s,2*x[e]-r))),r>p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),T=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?T.on("touchmove.brush",v).on("touchend.brush",y):T.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],f[1-z]-L[1]],L[0]=l[q],L[1]=f[z]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var zs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(Math.ceil(n/e)*e,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}();'use strict';(function(window){window.define=undefined;}).call(this,this);'use strict';tr.exportTo('tr.ui.b',function(){const DataSeriesEnableChangeEventType='data-series-enabled-change';const THIS_DOC=document.currentScript.ownerDocument;const svgNS='http://www.w3.org/2000/svg';const ColorScheme=tr.b.ColorScheme;function getColorOfKey(key,selected){let id=ColorScheme.getColorIdForGeneralPurposeString(key);if(selected){id+=ColorScheme.properties.brightenedOffsets[0];} +return this.rangeOfInterest_;},get rangeOfInterest(){return this.rangeOfInterest_;},set rangeOfInterest(rangeOfInterest){this.rangeOfInterest_=rangeOfInterest;this.updateContents_();},get selection(){},set selection(_){},get textLabel(){return'Frame Data';},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();}});tr.ui.side_panel.SidePanelRegistry.register(function(){return document.createElement('tr-ui-e-s-frame-data-side-panel');});});'use strict';Polymer({is:'tr-ui-b-chart-legend-key',ready(){this.$.checkbox.addEventListener('change',this.onCheckboxChange_.bind(this));},onCheckboxChange_(){tr.b.dispatchSimpleEvent(this,tr.ui.b.DataSeriesEnableChangeEventType,true,false,{key:Polymer.dom(this).textContent,enabled:this.enabled});},set textContent(t){Polymer.dom(this.$.label).textContent=t;Polymer.dom(this.$.link).textContent=t;this.updateContents_();},set width(w){w-=20;this.$.link.style.width=w+'px';this.$.label.style.width=w+'px';},get textContent(){return Polymer.dom(this.$.label).textContent;},set optional(optional){this.$.checkbox.style.visibility=optional?'visible':'hidden';},get optional(){return this.$.checkbox.style.visibility==='visible';},set enabled(enabled){this.$.checkbox.checked=enabled?'checked':'';},get enabled(){return this.$.checkbox.checked;},set color(c){this.$.label.style.color=c;this.$.link.color=c;},set target(target){this.$.link.setSelectionAndContent(target,Polymer.dom(this.$.label).textContent);this.updateContents_();},get target(){return this.$.link.selection;},set title(title){this.$.link.title=title;},updateContents_(){this.$.link.style.display=this.target?'':'none';this.$.label.style.display=this.target?'none':'';this.$.label.htmlFor=this.optional?'checkbox':'';}});'use strict';(function(window){window.define=function(x){window.d3=x;};window.define.amd=true;})(this);!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function d(){Xo.event.preventDefault()}function m(){for(var n,t=Xo.event;n=t.sourceEvent;)t=n;return t}function y(n){for(var t=new p,e=0,r=arguments.length;++e<r;)t[arguments[e]]=v(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=Xo.event;u.target=n,Xo.event=u,t[u.type].apply(e,r)}finally{Xo.event=i}}},t}function x(n){return fa(n,da),n}function M(n){return"function"==typeof n?n:function(){return ha(n,this)}}function _(n){return"function"==typeof n?n:function(){return ga(n,this)}}function b(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=Xo.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function w(n){return n.trim().replace(/\s+/g," ")}function S(n){return new RegExp("(?:^|\\s+)"+Xo.requote(n)+"(?:\\s+|$)","g")}function k(n){return n.trim().split(/^|\s+/)}function E(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=k(n).map(A);var u=n.length;return"function"==typeof t?r:e}function A(n){var t=S(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",w(u+" "+n))):e.setAttribute("class",w(u.replace(t," ")))}}function C(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function N(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function L(n){return"function"==typeof n?n:(n=Xo.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function T(n){return{__data__:n}}function q(n){return function(){return va(this,n)}}function z(n){return arguments.length||(n=Xo.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function R(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function U(){var n=this.__transition__;n&&++n.active}function j(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=c(t,Bo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+Xo.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),c=H;a>0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o,a=0,c=0,s=0;if(u=/([a-z]+)\((.*)\)/i.exec(n))switch(i=u[2].split(","),u[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Mt(i[0]),Mt(i[1]),Mt(i[2]))}return(o=Va.get(n))?t(o.r,o.g,o.b):(null!=n&&"#"===n.charAt(0)&&(r=parseInt(n.substring(1),16),isNaN(r)||(4===n.length?(a=(3840&r)>>4,a=a>>4|a,c=240&r,c=c>>4|c,s=15&r,s=s<<4|s):7===n.length&&(a=(16711680&r)>>16,c=(65280&r)>>8,s=255&r))),t(a,c,s))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return $a=n,e}function Nt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Lt(n,t){var e=Math.pow(10,3*oa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Tt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function zt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=zt;var r=new zt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=zt;var r=new zt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=uc[e=n.charAt(++a)])&&(e=n.charAt(++a)),(i=C[e])&&(e=i(t,null==u?"e"===e?" ":"0":u)),o.push(e),c=a+1);return o.push(n.substring(c,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},u=e(r,n,t,0);if(u!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var i=null!=r.Z&&ec!==zt,o=new(i?zt:ec);return"j"in r?o.setFullYear(r.y,0,r.j):"w"in r&&("W"in r||"U"in r)?(o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+Math.floor(r.Z/100),r.M+r.Z%100,r.S,r.L),i?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=zt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=zt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function Ft(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ot(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Yt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function It(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Zt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.y=Xt(+r[0]),e+r[0].length):-1}function Vt(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Xt(n){return n+(n>68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function re(){}function ue(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function ie(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function oe(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function ae(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)oe(n[e],t,1);t.polygonEnd()}function ce(){function n(n,t){n*=Na,t=t*Na/2+Sa/4;var e=n-r,o=e>=0?1:-1,a=o*e,c=Math.cos(t),s=Math.sin(t),l=i*s,f=u*c+l*Math.cos(a),h=l*o*Math.sin(a);hc.add(Math.atan2(h,f)),r=n,u=c,i=s}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*Na,u=Math.cos(a=(e=a)*Na/2+Sa/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function se(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function le(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function fe(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function he(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ge(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function pe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function ve(n){return[Math.atan2(n[1],n[0]),X(n[2])]}function de(n,t){return oa(n[0]-t[0])<Aa&&oa(n[1]-t[1])<Aa}function me(n,t){n*=Na;var e=Math.cos(t*=Na);ye(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function ye(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function xe(){function n(n,u){n*=Na;var i=Math.cos(u*=Na),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),ye(t,e,r)}var t,e,r;kc.point=function(u,i){u*=Na;var o=Math.cos(i*=Na);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,ye(t,e,r)}}function Me(){kc.point=me}function _e(){function n(n,t){n*=Na;var e=Math.cos(t*=Na),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-V(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),ye(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=Na;var c=Math.cos(a*=Na);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),ye(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Me,kc.point=me}}function be(){return!0}function we(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(de(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function ke(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Ee(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Sa,k=p*x;if(hc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*ka:_,S^h>=e^m>=e){var E=fe(se(f),se(n));pe(E);var A=fe(u,E);pe(A);var C=(S^_>=0?-1:1)*X(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function Te(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)<Aa?(n.point(e,r=(r+o)/2>0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)<Aa&&(e-=u*Aa),oa(i-a)<Aa&&(i-=a*Aa),r=qe(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function qe(n,t,e,r){var u,i,o=Math.sin(n-e);return oa(o)>Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function ze(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]<t[0]?Sa:-Sa;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function Re(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);he(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(he(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)<Aa,N=C||Aa>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)<Aa?k:E):k<=_[1]&&_[1]<=E:A>Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return he(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)<Aa?u>0?0:3:oa(r[0]-e)<Aa?u>0?2:1:oa(r[1]-t)<Aa?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function He(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)<Aa||oa(r-h)<Aa?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||oa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)<Aa?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Sa/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=I(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ea]},e):xr}function yr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return oa(u)<Aa?er:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-I(u)*Math.sqrt(n*n+e*e)]},e)}function xr(n,t){return[n,Math.log(Math.tan(Sa/4+t/2))]}function Mr(n){var t,e=Qe(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=Sa*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function _r(n,t){return[Math.log(Math.tan(Sa/4+t/2)),-n]}function br(n){return n[0]}function wr(n){return n[1]}function Sr(n){for(var t=n.length,e=[0,1],r=2,u=2;t>u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function Tr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Tr(n);for(var c=i;c.circle&&oa(e-c.circle.x)<Aa&&oa(r-c.circle.cy)<Aa;)i=c.P,a.unshift(c),Tr(c),c=i;a.unshift(c),Or(c);for(var s=o;s.circle&&oa(e-s.circle.x)<Aa&&oa(r-s.circle.cy)<Aa;)o=s.N,a.push(s),Tr(s),s=o;a.push(s),Or(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function zr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)<Aa&&p-u>Aa?{x:f,y:oa(t-f)<Aa?e:p}:oa(u-p)<Aa&&h-r>Aa?{x:oa(e-p)<Aa?t:h,y:p}:oa(r-h)<Aa&&u-g>Aa?{x:h,y:oa(t-h)<Aa?e:g}:oa(u-g)<Aa&&r-f>Aa?{x:oa(e-g)<Aa?t:f,y:g}:null),i.site,null)),++c)}function jr(n,t){return t.angle-n.angle}function Hr(){Jr(this),this.x=this.y=this.arc=this.site=this.cy=null}function Fr(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function Or(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),Jr(t),n.circle=null)}function Yr(n){for(var t,e=Vc,r=De(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!Ir(t,n)||!r(t)||oa(t.a.x-t.b.x)<Aa&&oa(t.a.y-t.b.y)<Aa)&&(t.a=t.b=null,e.splice(u,1))}function Ir(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2;if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function Zr(n,t){this.l=n,this.r=t,this.a=this.b=null}function Vr(n,t,e,r){var u=new Zr(n,t);return Vc.push(u),e&&$r(u,n,t,e),r&&$r(u,t,n,r),Xc[n.i].edges.push(new Br(u,n,t)),Xc[t.i].edges.push(new Br(u,t,n)),u}function Xr(n,t,e){var r=new Zr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function $r(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function Br(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function Wr(){this._=null}function Jr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function Gr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function Kr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Qr(n){for(;n.L;)n=n.L;return n}function nu(n,t){var e,r,u,i=n.sort(tu).pop();for(Vc=[],Xc=new Array(n.length),$c=new Wr,Wc=new Wr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new Pr(i),zr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;qr(u.arc)}t&&(Yr(t),Ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function tu(n,t){return t.y-n.y||t.x-n.x}function eu(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function ru(n){return n.x}function uu(n){return n.y}function iu(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function ou(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&ou(n,c[0],e,r,o,a),c[1]&&ou(n,c[1],o,r,u,a),c[2]&&ou(n,c[2],e,a,o,i),c[3]&&ou(n,c[3],o,a,u,i)}}function au(n,t){n=Xo.rgb(n),t=Xo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+vt(Math.round(e+i*n))+vt(Math.round(r+o*n))+vt(Math.round(u+a*n))}}function cu(n,t){var e,r={},u={};for(e in n)e in t?r[e]=fu(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function su(n,t){return t-=n=+n,function(e){return n+t*e}}function lu(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=Tu(t,e),i=qu(zu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*La,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*La:0}function Tu(n,t){return n[0]*t[0]+n[1]*t[1]}function qu(n){var t=Math.sqrt(Tu(n,n));return t&&(n[0]/=t,n[1]/=t),t}function zu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ru(n,t){var e,r=[],u=[],i=Xo.transform(n),o=Xo.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:su(a[0],c[0])},{i:3,x:su(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function Du(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function Pu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function Uu(n){for(var t=n.source,e=n.target,r=Hu(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function ju(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Hu(n,t){if(n===t)return n;for(var e=ju(n),r=ju(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function Fu(n){n.fixed|=2}function Ou(n){n.fixed&=-7}function Yu(n){n.fixed|=4,n.px=n.x,n.py=n.y}function Iu(n){n.fixed&=-5}function Zu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(Zu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function Vu(n,t){return Xo.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=Wu,n}function Xu(n){return n.children}function $u(n){return n.value}function Bu(n,t){return t.value-n.value}function Wu(n){return Xo.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function Ju(n){return n.x}function Gu(n){return n.y}function Ku(n,t,e){n.y0=t,n.y=e}function Qu(n){return Xo.range(n.length)}function ni(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function ti(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=li(e[i],t),n)>0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function vi(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Mi(r,u=a):Mi(r=c,u),o--):(xi(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)ki(u[i],t,e,r)}function Ei(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Ai(n){return 1+Xo.max(n,function(n){return n.y})}function Ci(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ni(n){var t=n.children;return t&&t.length?Ni(t[0]):n}function Li(n){var t,e=n.children;return e&&(t=e.length)?Li(e[t-1]):n}function Ti(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function qi(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function zi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():zi(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=Xo.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function Hi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=zi(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=zi(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return zi(t.a[0])},e.copy=function(){return Ji(n,t)},e.domain(n)}function Gi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=Xo.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[Xo.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort(Xo.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return Gi(n,t)},e()}function Ki(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=br,r=wr,u=be,i=oo,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=Ms.get(n)||oo).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function oo(n){return n.join("L")}function ao(n){return oo(n)+"Z"}function co(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function lo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function fo(n,t){return n.length<4?oo(n):n[1]+po(n.slice(1,n.length-1),vo(n,t))}function ho(n,t){return n.length<3?oo(n):n[0]+po((n.push(n[0]),n),vo([n[n.length-2]].concat(n,[n[1]]),t))}function go(n,t){return n.length<3?oo(n):n[0]+po(n,vo(n,t))}function po(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return oo(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function vo(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function mo(n){if(n.length<3)return oo(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",_o(ws,o),",",_o(ws,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),bo(c,o,a);return n.pop(),c.push("L",r),c.join("")}function yo(n){if(n.length<4)return oo(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(_o(ws,i)+","+_o(ws,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),bo(e,i,o);return e.join("")}function xo(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[_o(ws,o),",",_o(ws,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),bo(t,o,a);return t.join("")}function Mo(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return mo(n)}function _o(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function bo(n,t,e){n.push("C",_o(_s,t),",",_o(_s,e),",",_o(bs,t),",",_o(bs,e),",",_o(ws,t),",",_o(ws,e))}function wo(n,t){return(t[1]-n[1])/(t[0]-n[0])}function So(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=wo(u,i);++t<e;)r[t]=(o+(o=wo(u=i,i=n[t+1])))/2;return r[t]=o,r}function ko(n){for(var t,e,r,u,i=[],o=So(n),a=-1,c=n.length-1;++a<c;)t=wo(n[a],n[a+1]),oa(t)<Aa?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ys,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Co(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=_t(e),_=_t(u),b=e===r?function(){return g}:_t(r),w=u===i?function(){return p}:_t(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=br,r=br,u=0,i=wr,o=be,a=oo,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=Ms.get(n)||oo).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function No(n){return n.radius}function Lo(n){return[n.x,n.y]}function To(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ys;return[e*Math.cos(r),e*Math.sin(r)]}}function qo(){return 64}function zo(){return"circle"}function Ro(n){var t=Math.sqrt(n/Sa);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Do(n,t){return fa(n,Ns),n.id=t,n}function Po(n,t,e,r){var u=n.id;return R(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function Uo(n){return null==n&&(n=""),function(){this.textContent=n}}function jo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,Xo.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]<js[i]/u?i-1:i]:[Os,Yi(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=zi(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=zi(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.3"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},Xo.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},Xo.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r;return i?u+i*(n[r]-u):u},Xo.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var ia=Xo.bisector(function(n){return n});Xo.bisectLeft=ia.left,Xo.bisect=Xo.bisectRight=ia.right,Xo.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},Xo.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},Xo.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},Xo.transpose=function(n){return Xo.zip.apply(Xo,n)},Xo.keys=function(n){var t=[];for(var e in n)t.push(e);return t},Xo.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},Xo.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},Xo.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},Xo.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:i,get:function(n){return this[aa+n]},set:function(n,t){return this[aa+n]=t},remove:o,keys:a,values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1),this[t])}});var aa="\x00",ca=aa.charCodeAt(0);Xo.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=f(n,t,t[e]);return n};var sa=["webkit","ms","moz","Moz","o","O"];Xo.dispatch=function(){for(var n=new p,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=v(n);return n},p.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=Sizzle,va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return x(i)},da.selectAll=function(n){var t,e,r=[];n=_(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Bo(n.call(e,e.__data__,a,u))),t.parentNode=e);return x(r)};var ma={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};Xo.ns={prefix:ma,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!S(n[u]).test(t))return!1;return!0}for(t in n)this.each(E(t,n[t]));return this}return this.each(E(n,t))},da.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=T(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=T(o);for(;f>r;++r)p[r]=T(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=D([]),s=x([]),l=x([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},da.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},da.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=z.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},da.each=function(n){return R(this,function(t,e,r){n.call(t,t.__data__,e,r)})},da.call=function(n){var t=Bo(arguments);return n.apply(t[0]=this,t),this},da.empty=function(){return!this.node()},da.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return x(o)},ya.insert=function(n,t){return arguments.length<2&&(t=P(this)),da.insert.call(this,n,t)},da.transition=function(){for(var n,t,e=ks||++Ls,r=[],u=Es||{time:Date.now(),ease:yu,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&jo(t,c,e,u),n.push(t)}return Do(r,e)},da.interrupt=function(){return this.each(U)},Xo.select=function(n){var t=["string"==typeof n?ha(n,Wo):n];return t.parentNode=Jo,x([t])},Xo.selectAll=function(n){var t=Bo("string"==typeof n?ga(n,Wo):n);return t.parentNode=Jo,x([t])};var xa=Xo.select(Jo);da.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,Ta=Math.SQRT2,qa=2,za=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(Ta*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(Ta*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+za*f)/(2*i*qa*h),p=(c*c-i*i-za*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Ta;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=T.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=T.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=T.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=T.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",T=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=T.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,T,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<s;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;s>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv(" ","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;zt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:Tt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)ie(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){oe(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)oe(e[r],t,0)},Polygon:function(n,t){ae(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)ae(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)ie(e[r],t)}};Xo.geo.area=function(n){return fc=0,Xo.geo.stream(n,gc),fc};var fc,hc=new re,gc={sphere:function(){fc+=4*Sa},point:g,lineStart:g,lineEnd:g,polygonStart:function(){hc.reset(),gc.lineStart=ce},polygonEnd:function(){var n=2*hc;fc+=0>n?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,Te,ze,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(He)}).raw=He,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t<l.length-h;++t)g.push(n[a[l[t]][2]]);return g}var e=br,r=wr;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Xo.geom.polygon=function(n){return fa(n,Zc),n};var Zc=Xo.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=Cr(n),c=-1,s=this.length-Cr(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Er(o,l,u)?(Er(i,l,u)||n.push(Ar(i,o,l,u)),n.push(o)):Er(i,l,u)&&n.push(Ar(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];Pr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(jr),t.length},Br.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},Wr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Qr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(Gr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Kr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(Kr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Gr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Qr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,Gr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,Kr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,Gr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,Kr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,Gr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,Kr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},Xo.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return nu(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&eu(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=_t(r=n),t):r},t.y=function(n){return arguments.length?(o=_t(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];Xo.geom.delaunay=function(n){return Xo.geom.voronoi().triangles(n)},Xo.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(oa(c-e)+oa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=br,c=wr;return(o=arguments.length)?(a=ru,c=uu,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},Xo.interpolateRgb=au,Xo.interpolateObject=cu,Xo.interpolateNumber=su,Xo.interpolateString=lu;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;Xo.interpolate=fu,Xo.interpolators=[function(n,t){var e=typeof t;return("string"===e?Va.has(t)||/^(#|rgb\(|hsl\()/.test(t)?au:lu:t instanceof G?au:"object"===e?Array.isArray(t)?hu:cu:su)(n,t)}],Xo.interpolateArray=hu;var ns=function(){return bt},ts=Xo.map({linear:ns,poly:xu,quad:function(){return du},cubic:function(){return mu},sin:function(){return Mu},exp:function(){return _u},circle:function(){return bu},elastic:wu,back:Su,bounce:function(){return ku}}),es=Xo.map({"in":bt,out:pu,"in-out":vu,"out-in":function(n){return vu(pu(n))}});Xo.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Uu(n[e]));return t}},Xo.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=Xo.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push(Xo.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(ka-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},Xo.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=u-e,c=i*i+o*o;if(c>a*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=m.length,l=y.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=Bu,u=Xu,i=$u;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},Xo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=Xo.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},Vu(e,r)},Xo.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/Xo.sum(o),s=Xo.range(i.length);null!=e&&s.sort(e===as?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=as,r=0,u=ka;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var as={};Xo.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=Xo.permute(s,f),l=Xo.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;vi(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=si(a),i=ci(i),a&&i;)c=ci(c),o=si(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=Xo.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ti,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ti(t):qi(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return qi(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ti:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},Vu(i,a)},Xo.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[])},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(To(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=zo,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),jo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return Do(i,u)},Ns.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=_(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&jo(u,g,o,i),t.push(u)}return Do(a,o)},Ns.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=Ts,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":Ts,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ts="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return zs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=f[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=Ri(t),c=a[0],s=a[1],p=L[e],v=e?f:l,d=v[1]-v[0];return C&&(c-=p,s-=d+p),r=(e?g:h)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=p)+d:(x&&(p=Math.max(c,Math.min(s,2*x[e]-r))),r>p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),T=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?T.on("touchmove.brush",v).on("touchend.brush",y):T.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],f[1-z]-L[1]],L[0]=l[q],L[1]=f[z]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var zs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(Math.ceil(n/e)*e,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}();'use strict';(function(window){window.define=undefined;}).call(this,this);'use strict';tr.exportTo('tr.ui.b',function(){const DataSeriesEnableChangeEventType='data-series-enabled-change';const THIS_DOC=document.currentScript.ownerDocument;const svgNS='http://www.w3.org/2000/svg';const ColorScheme=tr.b.ColorScheme;function getColorOfKey(key,selected){let id=ColorScheme.getColorIdForGeneralPurposeString(key);if(selected){id+=ColorScheme.properties.brightenedOffsets[0];} return ColorScheme.colorsAsStrings[id];} function getSVGTextSize(parentNode,text,opt_callback,opt_this){const textNode=document.createElementNS('http://www.w3.org/2000/svg','text');textNode.setAttributeNS(null,'x',0);textNode.setAttributeNS(null,'y',0);textNode.setAttributeNS(null,'fill','black');textNode.appendChild(document.createTextNode(text));parentNode.appendChild(textNode);if(opt_callback){opt_callback.call(opt_this||parentNode,textNode);} const width=textNode.getComputedTextLength();const height=textNode.getBBox().height;parentNode.removeChild(textNode);return{width,height};} @@ -8068,7 +8068,7 @@ return snapshot;} function findAllEvents(rendererHelper,category,title){const targetEvents=[];for(const ev of rendererHelper.process.getDescendantEvents()){if(!hasCategoryAndName(ev,category,title))continue;targetEvents.push(ev);} return targetEvents;} -const URL_EXCLUSION=['','about:blank','data:text/html,pluginplaceholderdata','chrome-error://chromewebdata/'];function shouldIgnoreURL(url){return URL_EXCLUSION.includes(url);} +const URL_BLACKLIST=['','about:blank','data:text/html,pluginplaceholderdata','chrome-error://chromewebdata/'];function shouldIgnoreURL(url){return URL_BLACKLIST.includes(url);} function collectTimeToEvent(category,eventName,rendererHelper,frameToNavStartEvents){const targetEvents=findAllEvents(rendererHelper,category,eventName);const samples=[];for(const ev of targetEvents){if(rendererHelper.isTelemetryInternalEvent(ev))continue;const frameIdRef=ev.args.frame;const snapshot=findFrameLoaderSnapshotAt(rendererHelper,frameIdRef,ev.start);if(snapshot===undefined||!snapshot.args.isLoadingMainFrame)continue;const url=snapshot.args.documentLoaderURL;if(shouldIgnoreURL(url))continue;const navigationStartEvent=EventFinderUtils.findLastEventStartingOnOrBeforeTimestamp(frameToNavStartEvents.get(frameIdRef)||[],ev.start);if(navigationStartEvent===undefined)continue;const navStartToEventRange=tr.b.math.Range.fromExplicitRange(navigationStartEvent.start,ev.start);const networkEvents=getNetworkEventsInRange(rendererHelper.process,navStartToEventRange);const breakdownTree=tr.metrics.sh.generateWallClockTimeBreakdownTree(rendererHelper.mainThread,networkEvents,navStartToEventRange);samples.push({value:navStartToEventRange.duration,breakdownTree,diagnostics:{breakdown:createBreakdownDiagnostic(breakdownTree),url:new tr.v.d.GenericSet([url]),Start:new RelatedEventSet(navigationStartEvent),End:new RelatedEventSet(ev)}});} return samples;} function addFirstMeaningfulPaintSample(samples,rendererHelper,navigationStart,fmpMarkerEvent,url){const navStartToFMPRange=tr.b.math.Range.fromExplicitRange(navigationStart.start,fmpMarkerEvent.start);const networkEvents=getNetworkEventsInRange(rendererHelper.process,navStartToFMPRange);const timeToFirstMeaningfulPaint=navStartToFMPRange.duration;const breakdownTree=tr.metrics.sh.generateWallClockTimeBreakdownTree(rendererHelper.mainThread,networkEvents,navStartToFMPRange);samples.push({value:timeToFirstMeaningfulPaint,breakdownTree,diagnostics:{breakdown:createBreakdownDiagnostic(breakdownTree),start:new RelatedEventSet(navigationStart),end:new RelatedEventSet(fmpMarkerEvent),infos:new tr.v.d.GenericSet([{url,pid:rendererHelper.pid,start:navigationStart.start,fmp:fmpMarkerEvent.start,}]),}});} @@ -8083,7 +8083,7 @@ function addSamplesToHistogram(samples,histogram,histograms){for(const sample of samples){histogram.addSample(sample.value,sample.diagnostics);if(histogram.name!=='timeToFirstContentfulPaint')continue;if(!sample.breakdownTree)continue;for(const[category,breakdown]of Object.entries(sample.breakdownTree)){const relatedName=`${histogram.name}:${category}`;let relatedHist=histograms.getHistogramsNamed(relatedName)[0];if(!relatedHist){relatedHist=histograms.createHistogram(relatedName,histogram.unit,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,summaryOptions:{count:false,max:false,min:false,sum:false,},});let relatedNames=histogram.diagnostics.get('breakdown');if(!relatedNames){relatedNames=new tr.v.d.RelatedNameMap();histogram.diagnostics.set('breakdown',relatedNames);} relatedNames.set(category,relatedName);} relatedHist.addSample(breakdown.total,{breakdown:tr.v.d.Breakdown.fromEntries(Object.entries(breakdown.events)),});}}} -function loadingMetric(histograms,model){const firstPaintHistogram=histograms.createHistogram('timeToFirstPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first paint',summaryOptions:SUMMARY_OPTIONS,});const firstContentfulPaintHistogram=histograms.createHistogram('timeToFirstContentfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first contentful paint',summaryOptions:SUMMARY_OPTIONS,});const onLoadHistogram=histograms.createHistogram('timeToOnload',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to onload. '+'This is temporary metric used for PCv1/v2 correctness checking',summaryOptions:SUMMARY_OPTIONS,});const firstMeaningfulPaintHistogram=histograms.createHistogram('timeToFirstMeaningfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first meaningful paint',summaryOptions:SUMMARY_OPTIONS,});const firstMeaningfulPaintCpuTimeHistogram=histograms.createHistogram('cpuTimeToFirstMeaningfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'CPU time to first meaningful paint',summaryOptions:SUMMARY_OPTIONS,});const timeToInteractiveHistogram=histograms.createHistogram('timeToInteractive',timeDurationInMs_smallerIsBetter,[],{binBoundaries:TIME_TO_INTERACTIVE_BOUNDARIES,description:'Time to Interactive',summaryOptions:SUMMARY_OPTIONS,});const timeToFirstCpuIdleHistogram=histograms.createHistogram('timeToFirstCpuIdle',timeDurationInMs_smallerIsBetter,[],{binBoundaries:TIME_TO_INTERACTIVE_BOUNDARIES,description:'Time to First CPU Idle',summaryOptions:SUMMARY_OPTIONS,});const chromeHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);for(const pid in chromeHelper.rendererHelpers){const rendererHelper=chromeHelper.rendererHelpers[pid];if(rendererHelper.isChromeTracingUI)continue;const samplesSet=collectLoadingMetricsForRenderer(rendererHelper);addSamplesToHistogram(samplesSet.firstPaintSamples,firstPaintHistogram,histograms);addSamplesToHistogram(samplesSet.firstContentfulPaintSamples,firstContentfulPaintHistogram,histograms);addSamplesToHistogram(samplesSet.onLoadSamples,onLoadHistogram,histograms);addSamplesToHistogram(samplesSet.firstMeaningfulPaintSamples,firstMeaningfulPaintHistogram,histograms);addSamplesToHistogram(samplesSet.firstMeaningfulPaintCpuTimeSamples,firstMeaningfulPaintCpuTimeHistogram,histograms);addSamplesToHistogram(samplesSet.interactiveSamples,timeToInteractiveHistogram,histograms);addSamplesToHistogram(samplesSet.firstCpuIdleSamples,timeToFirstCpuIdleHistogram,histograms);}} +function loadingMetric(histograms,model){const firstPaintHistogram=histograms.createHistogram('timeToFirstPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first paint',summaryOptions:SUMMARY_OPTIONS,});const firstContentfulPaintHistogram=histograms.createHistogram('timeToFirstContentfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first contentful paint',summaryOptions:SUMMARY_OPTIONS,});const onLoadHistogram=histograms.createHistogram('timeToOnload',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to onload. '+'This is temporary metric used for PCv1/v2 sanity checking',summaryOptions:SUMMARY_OPTIONS,});const firstMeaningfulPaintHistogram=histograms.createHistogram('timeToFirstMeaningfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first meaningful paint',summaryOptions:SUMMARY_OPTIONS,});const firstMeaningfulPaintCpuTimeHistogram=histograms.createHistogram('cpuTimeToFirstMeaningfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'CPU time to first meaningful paint',summaryOptions:SUMMARY_OPTIONS,});const timeToInteractiveHistogram=histograms.createHistogram('timeToInteractive',timeDurationInMs_smallerIsBetter,[],{binBoundaries:TIME_TO_INTERACTIVE_BOUNDARIES,description:'Time to Interactive',summaryOptions:SUMMARY_OPTIONS,});const timeToFirstCpuIdleHistogram=histograms.createHistogram('timeToFirstCpuIdle',timeDurationInMs_smallerIsBetter,[],{binBoundaries:TIME_TO_INTERACTIVE_BOUNDARIES,description:'Time to First CPU Idle',summaryOptions:SUMMARY_OPTIONS,});const chromeHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);for(const pid in chromeHelper.rendererHelpers){const rendererHelper=chromeHelper.rendererHelpers[pid];if(rendererHelper.isChromeTracingUI)continue;const samplesSet=collectLoadingMetricsForRenderer(rendererHelper);addSamplesToHistogram(samplesSet.firstPaintSamples,firstPaintHistogram,histograms);addSamplesToHistogram(samplesSet.firstContentfulPaintSamples,firstContentfulPaintHistogram,histograms);addSamplesToHistogram(samplesSet.onLoadSamples,onLoadHistogram,histograms);addSamplesToHistogram(samplesSet.firstMeaningfulPaintSamples,firstMeaningfulPaintHistogram,histograms);addSamplesToHistogram(samplesSet.firstMeaningfulPaintCpuTimeSamples,firstMeaningfulPaintCpuTimeHistogram,histograms);addSamplesToHistogram(samplesSet.interactiveSamples,timeToInteractiveHistogram,histograms);addSamplesToHistogram(samplesSet.firstCpuIdleSamples,timeToFirstCpuIdleHistogram,histograms);}} tr.metrics.MetricRegistry.register(loadingMetric);return{loadingMetric,getNetworkEventsInRange,collectLoadingMetricsForRenderer,};});'use strict';tr.exportTo('tr.metrics',function(){const SPA_NAVIGATION_START_TO_FIRST_PAINT_DURATION_BIN_BOUNDARY=tr.v.HistogramBinBoundaries.createExponential(1,1000,50);function spaNavigationMetric(histograms,model){const histogram=new tr.v.Histogram('spaNavigationStartToFpDuration',tr.b.Unit.byName.timeDurationInMs_smallerIsBetter,SPA_NAVIGATION_START_TO_FIRST_PAINT_DURATION_BIN_BOUNDARY);histogram.description='Latency between the input event causing'+' a SPA navigation and the first paint event after it';histogram.customizeSummaryOptions({count:false,sum:false,});const modelHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);if(!modelHelper){return;} const rendererHelpers=modelHelper.rendererHelpers;if(!rendererHelpers){return;} const browserHelper=modelHelper.browserHelper;for(const rendererHelper of Object.values(rendererHelpers)){const spaNavigations=tr.metrics.findSpaNavigationsOnRenderer(rendererHelper,browserHelper);for(const spaNav of spaNavigations){let beginTs=0;if(spaNav.navStartCandidates.inputLatencyAsyncSlice){const beginData=spaNav.navStartCandidates.inputLatencyAsyncSlice.args.data;beginTs=model.convertTimestampToModelTime('traceEventClock',beginData.INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT.time);}else{beginTs=spaNav.navStartCandidates.goToIndexSlice.start;} @@ -8117,7 +8117,7 @@ const benchmarks=hist.diagnostics.get(tr.v.d.RESERVED_NAMES.BENCHMARKS);const start=hist.diagnostics.get(tr.v.d.RESERVED_NAMES.BENCHMARK_START);if(benchmarks===undefined){if(start===undefined)return'Value';return start.toString();} const benchmarksStr=Array.from(benchmarks).join('\n');if(start===undefined)return benchmarksStr;return benchmarksStr+'\n'+start.toString();});class GenericSetGrouping extends HistogramGrouping{constructor(name){super(name,undefined);this.callback_=this.compute_.bind(this);} compute_(hist){const diag=hist.diagnostics.get(this.key);if(diag===undefined)return'';const parts=Array.from(diag);parts.sort();return parts.join(',');}} -GenericSetGrouping.NAMES=[tr.v.d.RESERVED_NAMES.ARCHITECTURES,tr.v.d.RESERVED_NAMES.BENCHMARKS,tr.v.d.RESERVED_NAMES.BOTS,tr.v.d.RESERVED_NAMES.BUILDS,tr.v.d.RESERVED_NAMES.DEVICE_IDS,tr.v.d.RESERVED_NAMES.PRIMARYS,tr.v.d.RESERVED_NAMES.MEMORY_AMOUNTS,tr.v.d.RESERVED_NAMES.OS_NAMES,tr.v.d.RESERVED_NAMES.OS_VERSIONS,tr.v.d.RESERVED_NAMES.PRODUCT_VERSIONS,tr.v.d.RESERVED_NAMES.STORIES,tr.v.d.RESERVED_NAMES.STORYSET_REPEATS,tr.v.d.RESERVED_NAMES.STORY_TAGS,];for(const name of GenericSetGrouping.NAMES){new GenericSetGrouping(name);} +GenericSetGrouping.NAMES=[tr.v.d.RESERVED_NAMES.ARCHITECTURES,tr.v.d.RESERVED_NAMES.BENCHMARKS,tr.v.d.RESERVED_NAMES.BOTS,tr.v.d.RESERVED_NAMES.BUILDS,tr.v.d.RESERVED_NAMES.DEVICE_IDS,tr.v.d.RESERVED_NAMES.MASTERS,tr.v.d.RESERVED_NAMES.MEMORY_AMOUNTS,tr.v.d.RESERVED_NAMES.OS_NAMES,tr.v.d.RESERVED_NAMES.OS_VERSIONS,tr.v.d.RESERVED_NAMES.PRODUCT_VERSIONS,tr.v.d.RESERVED_NAMES.STORIES,tr.v.d.RESERVED_NAMES.STORYSET_REPEATS,tr.v.d.RESERVED_NAMES.STORY_TAGS,];for(const name of GenericSetGrouping.NAMES){new GenericSetGrouping(name);} class DateRangeGrouping extends HistogramGrouping{constructor(name){super(name,undefined);this.callback_=this.compute_.bind(this);} compute_(hist){const diag=hist.diagnostics.get(this.key);if(diag===undefined)return'';return diag.toString();}} DateRangeGrouping.NAMES=[tr.v.d.RESERVED_NAMES.BENCHMARK_START,tr.v.d.RESERVED_NAMES.TRACE_START,];for(const name of DateRangeGrouping.NAMES){new DateRangeGrouping(name);} @@ -10022,10 +10022,10 @@ root 522 2 0 0 rescuer_thread 0 S [sb-1] 5 root 523 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread1] 5 root 524 2 0 0 irq_thread 0 S [irq/309-mnh-rea] 5 -root 525 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl1] 5 +root 525 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl1] 5 root 526 2 0 0 rescuer_thread 0 S [sb-3] 5 root 527 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread3] 5 -root 528 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl3] 5 +root 528 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl3] 5 root 529 2 0 0 rescuer_thread 0 S [tbn_event_proce] 5 root 530 2 0 0 irq_thread 0 S [irq/79-qpnp_fla] 5 root 531 2 0 0 irq_thread 0 S [irq/78-qpnp_fla] 5 @@ -10055,7 +10055,7 @@ logd 571 1 26316 9760 SyS_rt_sigsuspend 0 S logd logd root 572 2 0 0 worker_thread 0 S [kworker/6:1H] 5 system 573 1 15872 2472 do_wait 0 S qseecomd qseecomd -system 574 1 14352 2684 binder_ioctl 0 S [email protected] [email protected] +system 574 1 14352 2684 binder_ioctl 0 S [email protected] [email protected] root 576 2 0 0 kthread_worker_fn 0 S [sugov:0] 5 root 577 2 0 0 kthread_worker_fn 0 S [sugov:4] 5 root 578 2 0 0 worker_thread 0 S [kworker/0:1H] 5 @@ -10069,7 +10069,7 @@ system 609 1 11184 1824 binder_ioctl 0 S vndservicemanager vndservicemanager root 611 1 12512 2596 binder_ioctl 0 S [email protected] [email protected] system 612 1 13964 2776 binder_ioctl 0 S [email protected] [email protected] -hsm 613 1 2127100 4308 binder_ioctl 0 S [email protected] [email protected] +hsm 613 1 2127100 4308 binder_ioctl 0 S [email protected] [email protected] hsm 614 1 17568 2056 binder_ioctl 0 S citadeld citadeld system 616 1 20044 3444 do_sys_poll 0 S sscrpcd sscrpcd system 619 1 2146004 8052 binder_ioctl 0 S [email protected] [email protected] @@ -10751,7 +10751,7 @@ logd 571 590 logd.auditd root 572 572 kworker/6:1H system 573 573 qseecomd -system 574 574 [email protected] +system 574 574 [email protected] root 576 576 sugov:0 root 577 577 sugov:4 root 578 578 kworker/0:1H @@ -10779,7 +10779,7 @@ system 609 609 vndservicemanag root 611 611 [email protected] system 612 612 [email protected] -hsm 613 613 [email protected] +hsm 613 613 [email protected] hsm 614 614 citadeld hsm 614 627 citadeld hsm 614 628 Binder:614_1 @@ -12980,10 +12980,10 @@ root 522 2 0 0 rescuer_thread 0 S [sb-1] 5 root 523 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread1] 5 root 524 2 0 0 irq_thread 0 S [irq/309-mnh-rea] 5 -root 525 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl1] 5 +root 525 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl1] 5 root 526 2 0 0 rescuer_thread 0 S [sb-3] 5 root 527 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread3] 5 -root 528 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl3] 5 +root 528 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl3] 5 root 529 2 0 0 rescuer_thread 0 S [tbn_event_proce] 5 root 530 2 0 0 irq_thread 0 S [irq/79-qpnp_fla] 5 root 531 2 0 0 irq_thread 0 S [irq/78-qpnp_fla] 5 @@ -13013,7 +13013,7 @@ logd 571 1 26316 9760 SyS_rt_sigsuspend 0 S logd logd root 572 2 0 0 worker_thread 0 S [kworker/6:1H] 5 system 573 1 15872 2472 do_wait 0 S qseecomd qseecomd -system 574 1 14352 2684 binder_ioctl 0 S [email protected] [email protected] +system 574 1 14352 2684 binder_ioctl 0 S [email protected] [email protected] root 576 2 0 0 kthread_worker_fn 0 S [sugov:0] 5 root 577 2 0 0 kthread_worker_fn 0 S [sugov:4] 5 root 578 2 0 0 worker_thread 0 S [kworker/0:1H] 5 @@ -13027,7 +13027,7 @@ system 609 1 11184 1824 binder_ioctl 0 S vndservicemanager vndservicemanager root 611 1 12512 2596 binder_ioctl 0 S [email protected] [email protected] system 612 1 13964 2776 binder_ioctl 0 S [email protected] [email protected] -hsm 613 1 2127100 4308 binder_ioctl 0 S [email protected] [email protected] +hsm 613 1 2127100 4308 binder_ioctl 0 S [email protected] [email protected] hsm 614 1 17568 2056 binder_ioctl 0 S citadeld citadeld system 616 1 20044 3444 do_sys_poll 0 S sscrpcd sscrpcd system 619 1 2146004 8036 binder_ioctl 0 S [email protected] [email protected] @@ -13708,7 +13708,7 @@ logd 571 590 logd.auditd root 572 572 kworker/6:1H system 573 573 qseecomd -system 574 574 [email protected] +system 574 574 [email protected] root 576 576 sugov:0 root 577 577 sugov:4 root 578 578 kworker/0:1H @@ -13736,7 +13736,7 @@ system 609 609 vndservicemanag root 611 611 [email protected] system 612 612 [email protected] -hsm 613 613 [email protected] +hsm 613 613 [email protected] hsm 614 614 citadeld hsm 614 627 citadeld hsm 614 628 Binder:614_1
diff --git a/tools/systrace_parser/parser/test/omr1.html b/tools/systrace_parser/parser/test/omr1.html index 7c17331..be86b77 100755 --- a/tools/systrace_parser/parser/test/omr1.html +++ b/tools/systrace_parser/parser/test/omr1.html
@@ -4928,7 +4928,7 @@ visitedDomainIds.add(current.domainId);const outgoingTransformers=this.transformerMapByDomainId_[current.domainId];if(!outgoingTransformers)continue;for(const outgoingDomainId in outgoingTransformers){const toNextDomainTransformer=outgoingTransformers[outgoingDomainId];const toCurrentDomainTransformer=current.transformer;queue.push({domainId:outgoingDomainId,transformer:Transformer.compose(toNextDomainTransformer,toCurrentDomainTransformer)});}} return undefined;},selectModelDomainId_(){this.ensureAllDomainsAreConnected_();for(const chromeDomainId of POSSIBLE_CHROME_CLOCK_DOMAINS){if(this.domainsSeen_.has(chromeDomainId)){this.modelDomainId_=chromeDomainId;return;}} const domainsSeenArray=Array.from(this.domainsSeen_);domainsSeenArray.sort();this.modelDomainId_=domainsSeenArray[0];},ensureAllDomainsAreConnected_(){let firstDomainId=undefined;for(const domainId of this.domainsSeen_){if(!firstDomainId){firstDomainId=domainId;continue;} -if(!this.getTransformerBetween_(firstDomainId,domainId)){throw new Error('Unable to select a primary clock domain because no '+'path can be found from "'+firstDomainId+'" to "'+domainId+'".');}} +if(!this.getTransformerBetween_(firstDomainId,domainId)){throw new Error('Unable to select a master clock domain because no '+'path can be found from "'+firstDomainId+'" to "'+domainId+'".');}} return true;},onDomainSeen_(domainId){if(domainId===ClockDomainId.UNKNOWN_CHROME_LEGACY&&!this.domainsSeen_.has(ClockDomainId.UNKNOWN_CHROME_LEGACY)){for(const chromeDomainId of POSSIBLE_CHROME_CLOCK_DOMAINS){if(chromeDomainId===ClockDomainId.UNKNOWN_CHROME_LEGACY){continue;} this.collapseDomains_(ClockDomainId.UNKNOWN_CHROME_LEGACY,chromeDomainId);}} this.domainsSeen_.add(domainId);},onSyncCompleted_(marker1,marker2){const forwardTransformer=Transformer.fromMarkers(marker1,marker2);const backwardTransformer=Transformer.fromMarkers(marker2,marker1);const existingTransformer=this.getOrCreateTransformerMap_(marker1.domainId)[marker2.domainId];if(!existingTransformer||forwardTransformer.error<existingTransformer.error){this.getOrCreateTransformerMap_(marker1.domainId)[marker2.domainId]=forwardTransformer;this.getOrCreateTransformerMap_(marker2.domainId)[marker1.domainId]=backwardTransformer;}},collapseDomains_(domain1Id,domain2Id){this.getOrCreateTransformerMap_(domain1Id)[domain2Id]=this.getOrCreateTransformerMap_(domain2Id)[domain1Id]=Transformer.IDENTITY;},getOrCreateTransformerMap_(domainId){if(!this.transformerMapByDomainId_[domainId]){this.transformerMapByDomainId_[domainId]={};} @@ -5311,7 +5311,7 @@ readToOffset_(offset){const out=InMemoryTraceStream.uint8ArrayToString_(this.data_.subarray(this.cursor_,offset));this.cursor_=offset;return out;} static uint8ArrayToString_(arr){const c=[];for(let i=0;i<arr.length;i+=MAX_FUNCTION_ARGS_COUNT){c.push(String.fromCharCode(...arr.subarray(i,i+MAX_FUNCTION_ARGS_COUNT)));} return c.join('');}} -return{InMemoryTraceStream,};});!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,f,g,h,i,j="",k=0;k<a.length;)b=a.charCodeAt(k++),c=a.charCodeAt(k++),e=a.charCodeAt(k++),f=b>>2,g=(3&b)<<4|c>>4,h=(15&c)<<2|e>>6,i=63&e,isNaN(c)?h=i=64:isNaN(e)&&(i=64),j=j+d.charAt(f)+d.charAt(g)+d.charAt(h)+d.charAt(i);return j},c.decode=function(a){var b,c,e,f,g,h,i,j="",k=0;for(a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");k<a.length;)f=d.indexOf(a.charAt(k++)),g=d.indexOf(a.charAt(k++)),h=d.indexOf(a.charAt(k++)),i=d.indexOf(a.charAt(k++)),b=f<<2|g>>4,c=(15&g)<<4|h>>2,e=(3&h)<<6|i,j+=String.fromCharCode(b),64!=h&&(j+=String.fromCharCode(c)),64!=i&&(j+=String.fromCharCode(e));return j}},{}],2:[function(a,b){"use strict";function c(){this.compressedSize=0,this.uncompressedSize=0,this.crc32=0,this.compressionMethod=null,this.compressedContent=null}c.prototype={getContent:function(){return null},getCompressedContent:function(){return null}},b.exports=c},{}],3:[function(a,b,c){"use strict";c.STORE={magic:"\x00\x00",compress:function(a){return a},uncompress:function(a){return a},compressInputType:null,uncompressInputType:null},c.DEFLATE=a("./flate")},{"./flate":8}],4:[function(a,b){"use strict";var c=a("./utils"),d=[0,1996959894,3993919788,2567524794,124634137,1886057615,3915621685,2657392035,249268274,2044508324,3772115230,2547177864,162941995,2125561021,3887607047,2428444049,498536548,1789927666,4089016648,2227061214,450548861,1843258603,4107580753,2211677639,325883990,1684777152,4251122042,2321926636,335633487,1661365465,4195302755,2366115317,997073096,1281953886,3579855332,2724688242,1006888145,1258607687,3524101629,2768942443,901097722,1119000684,3686517206,2898065728,853044451,1172266101,3705015759,2882616665,651767980,1373503546,3369554304,3218104598,565507253,1454621731,3485111705,3099436303,671266974,1594198024,3322730930,2970347812,795835527,1483230225,3244367275,3060149565,1994146192,31158534,2563907772,4023717930,1907459465,112637215,2680153253,3904427059,2013776290,251722036,2517215374,3775830040,2137656763,141376813,2439277719,3865271297,1802195444,476864866,2238001368,4066508878,1812370925,453092731,2181625025,4111451223,1706088902,314042704,2344532202,4240017532,1658658271,366619977,2362670323,4224994405,1303535960,984961486,2747007092,3569037538,1256170817,1037604311,2765210733,3554079995,1131014506,879679996,2909243462,3663771856,1141124467,855842277,2852801631,3708648649,1342533948,654459306,3188396048,3373015174,1466479909,544179635,3110523913,3462522015,1591671054,702138776,2966460450,3352799412,1504918807,783551873,3082640443,3233442989,3988292384,2596254646,62317068,1957810842,3939845945,2647816111,81470997,1943803523,3814918930,2489596804,225274430,2053790376,3826175755,2466906013,167816743,2097651377,4027552580,2265490386,503444072,1762050814,4150417245,2154129355,426522225,1852507879,4275313526,2312317920,282753626,1742555852,4189708143,2394877945,397917763,1622183637,3604390888,2714866558,953729732,1340076626,3518719985,2797360999,1068828381,1219638859,3624741850,2936675148,906185462,1090812512,3747672003,2825379669,829329135,1181335161,3412177804,3160834842,628085408,1382605366,3423369109,3138078467,570562233,1426400815,3317316542,2998733608,733239954,1555261956,3268935591,3050360625,752459403,1541320221,2607071920,3965973030,1969922972,40735498,2617837225,3943577151,1913087877,83908371,2512341634,3803740692,2075208622,213261112,2463272603,3855990285,2094854071,198958881,2262029012,4057260610,1759359992,534414190,2176718541,4139329115,1873836001,414664567,2282248934,4279200368,1711684554,285281116,2405801727,4167216745,1634467795,376229701,2685067896,3608007406,1308918612,956543938,2808555105,3495958263,1231636301,1047427035,2932959818,3654703836,1088359270,936918e3,2847714899,3736837829,1202900863,817233897,3183342108,3401237130,1404277552,615818150,3134207493,3453421203,1423857449,601450431,3009837614,3294710456,1567103746,711928724,3020668471,3272380065,1510334235,755167117];b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var e="string"!==c.getTypeOf(a);"undefined"==typeof b&&(b=0);var f=0,g=0,h=0;b=-1^b;for(var i=0,j=a.length;j>i;i++)h=e?a[i]:a.charCodeAt(i),g=255&(b^h),f=d[g],b=b>>>8^f;return-1^b}},{"./utils":21}],5:[function(a,b){"use strict";function c(){this.data=null,this.length=0,this.index=0}var d=a("./utils");c.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<a||0>a)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return d.transformTo("string",this.readData(a))},readData:function(){},lastIndexOfSignature:function(){},readDate:function(){var a=this.readInt(4);return new Date((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1)}},b.exports=c},{"./utils":21}],6:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!1,c.date=null,c.compression=null,c.comment=null},{}],7:[function(a,b,c){"use strict";var d=a("./utils");c.string2binary=function(a){return d.string2binary(a)},c.string2Uint8Array=function(a){return d.transformTo("uint8array",a)},c.uint8Array2String=function(a){return d.transformTo("string",a)},c.string2Blob=function(a){var b=d.transformTo("arraybuffer",a);return d.arrayBuffer2Blob(b)},c.arrayBuffer2Blob=function(a){return d.arrayBuffer2Blob(a)},c.transformTo=function(a,b){return d.transformTo(a,b)},c.getTypeOf=function(a){return d.getTypeOf(a)},c.checkSupport=function(a){return d.checkSupport(a)},c.MAX_VALUE_16BITS=d.MAX_VALUE_16BITS,c.MAX_VALUE_32BITS=d.MAX_VALUE_32BITS,c.pretty=function(a){return d.pretty(a)},c.findCompression=function(a){return d.findCompression(a)},c.isRegExp=function(a){return d.isRegExp(a)}},{"./utils":21}],8:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,e=a("pako");c.uncompressInputType=d?"uint8array":"array",c.compressInputType=d?"uint8array":"array",c.magic="\b\x00",c.compress=function(a){return e.deflateRaw(a)},c.uncompress=function(a){return e.inflateRaw(a)}},{pako:24}],9:[function(a,b){"use strict";function c(a,b){return this instanceof c?(this.files={},this.comment=null,this.root="",a&&this.load(a,b),void(this.clone=function(){var a=new c;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a})):new c(a,b)}var d=a("./base64");c.prototype=a("./object"),c.prototype.load=a("./load"),c.support=a("./support"),c.defaults=a("./defaults"),c.utils=a("./deprecatedPublicUtils"),c.base64={encode:function(a){return d.encode(a)},decode:function(a){return d.decode(a)}},c.compressions=a("./compressions"),b.exports=c},{"./base64":1,"./compressions":3,"./defaults":6,"./deprecatedPublicUtils":7,"./load":10,"./object":13,"./support":17}],10:[function(a,b){"use strict";var c=a("./base64"),d=a("./zipEntries");b.exports=function(a,b){var e,f,g,h;for(b=b||{},b.base64&&(a=c.decode(a)),f=new d(a,b),e=f.files,g=0;g<e.length;g++)h=e[g],this.file(h.fileName,h.decompressed,{binary:!0,optimizedBinaryString:!0,date:h.date,dir:h.dir,comment:h.fileComment.length?h.fileComment:null,createFolders:b.createFolders});return f.zipComment.length&&(this.comment=f.zipComment),this}},{"./base64":1,"./zipEntries":22}],11:[function(a,b){(function(a){"use strict";b.exports=function(b,c){return new a(b,c)},b.exports.test=function(b){return a.isBuffer(b)}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],12:[function(a,b){"use strict";function c(a){this.data=a,this.length=this.data.length,this.index=0}var d=a("./uint8ArrayReader");c.prototype=new d,c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./uint8ArrayReader":18}],13:[function(a,b){"use strict";var c=a("./support"),d=a("./utils"),e=a("./crc32"),f=a("./signature"),g=a("./defaults"),h=a("./base64"),i=a("./compressions"),j=a("./compressedObject"),k=a("./nodeBuffer"),l=a("./utf8"),m=a("./stringWriter"),n=a("./uint8ArrayWriter"),o=function(a){if(a._data instanceof j&&(a._data=a._data.getContent(),a.options.binary=!0,a.options.base64=!1,"uint8array"===d.getTypeOf(a._data))){var b=a._data;a._data=new Uint8Array(b.length),0!==b.length&&a._data.set(b,0)}return a._data},p=function(a){var b=o(a),e=d.getTypeOf(b);return"string"===e?!a.options.binary&&c.nodebuffer?k(b,"utf-8"):a.asBinary():b},q=function(a){var b=o(this);return null===b||"undefined"==typeof b?"":(this.options.base64&&(b=h.decode(b)),b=a&&this.options.binary?A.utf8decode(b):d.transformTo("string",b),a||this.options.binary||(b=d.transformTo("string",A.utf8encode(b))),b)},r=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this._data=b,this.options=c,this._initialMetadata={dir:c.dir,date:c.date}};r.prototype={asText:function(){return q.call(this,!0)},asBinary:function(){return q.call(this,!1)},asNodeBuffer:function(){var a=p(this);return d.transformTo("nodebuffer",a)},asUint8Array:function(){var a=p(this);return d.transformTo("uint8array",a)},asArrayBuffer:function(){return this.asUint8Array().buffer}};var s=function(a,b){var c,d="";for(c=0;b>c;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},t=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},u=function(a){return a=a||{},a.base64!==!0||null!==a.binary&&void 0!==a.binary||(a.binary=!0),a=t(a,g),a.date=a.date||new Date,null!==a.compression&&(a.compression=a.compression.toUpperCase()),a},v=function(a,b,c){var e,f=d.getTypeOf(b);if(c=u(c),c.createFolders&&(e=w(a))&&x.call(this,e,!0),c.dir||null===b||"undefined"==typeof b)c.base64=!1,c.binary=!1,b=null;else if("string"===f)c.binary&&!c.base64&&c.optimizedBinaryString!==!0&&(b=d.string2binary(b));else{if(c.base64=!1,c.binary=!0,!(f||b instanceof j))throw new Error("The data of '"+a+"' is in an unsupported format !");"arraybuffer"===f&&(b=d.transformTo("uint8array",b))}var g=new r(a,b,c);return this.files[a]=g,g},w=function(a){"/"==a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},x=function(a,b){return"/"!=a.slice(-1)&&(a+="/"),b="undefined"!=typeof b?b:!1,this.files[a]||v.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},y=function(a,b){var c,f=new j;return a._data instanceof j?(f.uncompressedSize=a._data.uncompressedSize,f.crc32=a._data.crc32,0===f.uncompressedSize||a.dir?(b=i.STORE,f.compressedContent="",f.crc32=0):a._data.compressionMethod===b.magic?f.compressedContent=a._data.getCompressedContent():(c=a._data.getContent(),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c)))):(c=p(a),(!c||0===c.length||a.dir)&&(b=i.STORE,c=""),f.uncompressedSize=c.length,f.crc32=e(c),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c))),f.compressedSize=f.compressedContent.length,f.compressionMethod=b.magic,f},z=function(a,b,c,g){var h,i,j,k,m=(c.compressedContent,d.transformTo("string",l.utf8encode(b.name))),n=b.comment||"",o=d.transformTo("string",l.utf8encode(n)),p=m.length!==b.name.length,q=o.length!==n.length,r=b.options,t="",u="",v="";j=b._initialMetadata.dir!==b.dir?b.dir:r.dir,k=b._initialMetadata.date!==b.date?b.date:r.date,h=k.getHours(),h<<=6,h|=k.getMinutes(),h<<=5,h|=k.getSeconds()/2,i=k.getFullYear()-1980,i<<=4,i|=k.getMonth()+1,i<<=5,i|=k.getDate(),p&&(u=s(1,1)+s(e(m),4)+m,t+="up"+s(u.length,2)+u),q&&(v=s(1,1)+s(this.crc32(o),4)+o,t+="uc"+s(v.length,2)+v);var w="";w+="\n\x00",w+=p||q?"\x00\b":"\x00\x00",w+=c.compressionMethod,w+=s(h,2),w+=s(i,2),w+=s(c.crc32,4),w+=s(c.compressedSize,4),w+=s(c.uncompressedSize,4),w+=s(m.length,2),w+=s(t.length,2);var x=f.LOCAL_FILE_HEADER+w+m+t,y=f.CENTRAL_FILE_HEADER+"\x00"+w+s(o.length,2)+"\x00\x00\x00\x00"+(j===!0?"\x00\x00\x00":"\x00\x00\x00\x00")+s(g,4)+m+t+o;return{fileRecord:x,dirRecord:y,compressedObject:c}},A={load:function(){throw new Error("Load method is not defined. Is the file jszip-load.js included ?")},filter:function(a){var b,c,d,e,f=[];for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],e=new r(d.name,d._data,t(d.options)),c=b.slice(this.root.length,b.length),b.slice(0,this.root.length)===this.root&&a(c,e)&&f.push(e));return f},file:function(a,b,c){if(1===arguments.length){if(d.isRegExp(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}return this.filter(function(b,c){return!c.dir&&b===a})[0]||null}return a=this.root+a,v.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d.isRegExp(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=x.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!=a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){a=t(a||{},{base64:!0,compression:"STORE",type:"base64",comment:null}),d.checkSupport(a.type);var b,c,e=[],g=0,j=0,k=d.transformTo("string",this.utf8encode(a.comment||this.comment||""));for(var l in this.files)if(this.files.hasOwnProperty(l)){var o=this.files[l],p=o.options.compression||a.compression.toUpperCase(),q=i[p];if(!q)throw new Error(p+" is not a valid compression method !");var r=y.call(this,o,q),u=z.call(this,l,o,r,g);g+=u.fileRecord.length+r.compressedSize,j+=u.dirRecord.length,e.push(u)}var v="";v=f.CENTRAL_DIRECTORY_END+"\x00\x00\x00\x00"+s(e.length,2)+s(e.length,2)+s(j,4)+s(g,4)+s(k.length,2)+k;var w=a.type.toLowerCase();for(b="uint8array"===w||"arraybuffer"===w||"blob"===w||"nodebuffer"===w?new n(g+j+v.length):new m(g+j+v.length),c=0;c<e.length;c++)b.append(e[c].fileRecord),b.append(e[c].compressedObject.compressedContent);for(c=0;c<e.length;c++)b.append(e[c].dirRecord);b.append(v);var x=b.finalize();switch(a.type.toLowerCase()){case"uint8array":case"arraybuffer":case"nodebuffer":return d.transformTo(a.type.toLowerCase(),x);case"blob":return d.arrayBuffer2Blob(d.transformTo("arraybuffer",x));case"base64":return a.base64?h.encode(x):x;default:return x}},crc32:function(a,b){return e(a,b)},utf8encode:function(a){return d.transformTo("string",l.utf8encode(a))},utf8decode:function(a){return l.utf8decode(a)}};b.exports=A},{"./base64":1,"./compressedObject":2,"./compressions":3,"./crc32":4,"./defaults":6,"./nodeBuffer":11,"./signature":14,"./stringWriter":16,"./support":17,"./uint8ArrayWriter":19,"./utf8":20,"./utils":21}],14:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],15:[function(a,b){"use strict";function c(a,b){this.data=a,b||(this.data=e.string2binary(this.data)),this.length=this.data.length,this.index=0}var d=a("./dataReader"),e=a("./utils");c.prototype=new d,c.prototype.byteAt=function(a){return this.data.charCodeAt(a)},c.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)},c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5,"./utils":21}],16:[function(a,b){"use strict";var c=a("./utils"),d=function(){this.data=[]};d.prototype={append:function(a){a=c.transformTo("string",a),this.data.push(a)},finalize:function(){return this.data.join("")}},b.exports=d},{"./utils":21}],17:[function(a,b,c){(function(a){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof a,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var b=new ArrayBuffer(0);try{c.blob=0===new Blob([b],{type:"application/zip"}).size}catch(d){try{var e=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,f=new e;f.append(b),c.blob=0===f.getBlob("application/zip").size}catch(d){c.blob=!1}}}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],18:[function(a,b){"use strict";function c(a){a&&(this.data=a,this.length=this.data.length,this.index=0)}var d=a("./dataReader");c.prototype=new d,c.prototype.byteAt=function(a){return this.data[a]},c.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f;return-1},c.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5}],19:[function(a,b){"use strict";var c=a("./utils"),d=function(a){this.data=new Uint8Array(a),this.index=0};d.prototype={append:function(a){0!==a.length&&(a=c.transformTo("uint8array",a),this.data.set(a,this.index),this.index+=a.length)},finalize:function(){return this.data}},b.exports=d},{"./utils":21}],20:[function(a,b,c){"use strict";for(var d=a("./utils"),e=a("./support"),f=a("./nodeBuffer"),g=new Array(256),h=0;256>h;h++)g[h]=h>=252?6:h>=248?5:h>=240?4:h>=224?3:h>=192?2:1;g[254]=g[254]=1;var i=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=e.uint8array?new Uint8Array(i):new Array(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},j=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+g[a[c]]>b?c:b},k=function(a){var b,c,e,f,h=a.length,i=new Array(2*h);for(c=0,b=0;h>b;)if(e=a[b++],128>e)i[c++]=e;else if(f=g[e],f>4)i[c++]=65533,b+=f-1;else{for(e&=2===f?31:3===f?15:7;f>1&&h>b;)e=e<<6|63&a[b++],f--;f>1?i[c++]=65533:65536>e?i[c++]=e:(e-=65536,i[c++]=55296|e>>10&1023,i[c++]=56320|1023&e)}return i.length!==c&&(i.subarray?i=i.subarray(0,c):i.length=c),d.applyFromCharCode(i)};c.utf8encode=function(a){return e.nodebuffer?f(a,"utf-8"):i(a)},c.utf8decode=function(a){if(e.nodebuffer)return d.transformTo("nodebuffer",a).toString("utf-8");a=d.transformTo(e.uint8array?"uint8array":"array",a);for(var b=[],c=0,f=a.length,g=65536;f>c;){var h=j(a,Math.min(c+g,f));b.push(e.uint8array?k(a.subarray(c,h)):k(a.slice(c,h))),c=h}return b.join("")}},{"./nodeBuffer":11,"./support":17,"./utils":21}],21:[function(a,b,c){"use strict";function d(a){return a}function e(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function f(a){var b=65536,d=[],e=a.length,f=c.getTypeOf(a),g=0,h=!0;try{switch(f){case"uint8array":String.fromCharCode.apply(null,new Uint8Array(0));break;case"nodebuffer":String.fromCharCode.apply(null,j(0))}}catch(i){h=!1}if(!h){for(var k="",l=0;l<a.length;l++)k+=String.fromCharCode(a[l]);return k}for(;e>g&&b>1;)try{d.push("array"===f||"nodebuffer"===f?String.fromCharCode.apply(null,a.slice(g,Math.min(g+b,e))):String.fromCharCode.apply(null,a.subarray(g,Math.min(g+b,e)))),g+=b}catch(i){b=Math.floor(b/2)}return d.join("")}function g(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];return b}var h=a("./support"),i=a("./compressions"),j=a("./nodeBuffer");c.string2binary=function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(255&a.charCodeAt(c));return b},c.arrayBuffer2Blob=function(a){c.checkSupport("blob");try{return new Blob([a],{type:"application/zip"})}catch(b){try{var d=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,e=new d;return e.append(a),e.getBlob("application/zip")}catch(b){throw new Error("Bug : can't construct the Blob.")}}},c.applyFromCharCode=f;var k={};k.string={string:d,array:function(a){return e(a,new Array(a.length))},arraybuffer:function(a){return k.string.uint8array(a).buffer},uint8array:function(a){return e(a,new Uint8Array(a.length))},nodebuffer:function(a){return e(a,j(a.length))}},k.array={string:f,array:d,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(a)}},k.arraybuffer={string:function(a){return f(new Uint8Array(a))},array:function(a){return g(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:d,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(new Uint8Array(a))}},k.uint8array={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:d,nodebuffer:function(a){return j(a)}},k.nodebuffer={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return k.nodebuffer.uint8array(a).buffer},uint8array:function(a){return g(a,new Uint8Array(a.length))},nodebuffer:d},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=k[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":h.nodebuffer&&j.test(a)?"nodebuffer":h.uint8array&&a instanceof Uint8Array?"uint8array":h.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=h[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this browser")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(16>b?"0":"")+b.toString(16).toUpperCase();return d},c.findCompression=function(a){for(var b in i)if(i.hasOwnProperty(b)&&i[b].magic===a)return i[b];return null},c.isRegExp=function(a){return"[object RegExp]"===Object.prototype.toString.call(a)}},{"./compressions":3,"./nodeBuffer":11,"./support":17}],22:[function(a,b){"use strict";function c(a,b){this.files=[],this.loadOptions=b,a&&this.load(a)}var d=a("./stringReader"),e=a("./nodeBufferReader"),f=a("./uint8ArrayReader"),g=a("./utils"),h=a("./signature"),i=a("./zipEntry"),j=a("./support"),k=a("./object");c.prototype={checkSignature:function(a){var b=this.reader.readString(4);if(b!==a)throw new Error("Corrupted zip or bug : unexpected signature ("+g.pretty(b)+", expected "+g.pretty(a)+")")},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2),this.zipComment=this.reader.readString(this.zipCommentLength),this.zipComment=k.utf8decode(this.zipComment)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.versionMadeBy=this.reader.readString(2),this.versionNeeded=this.reader.readInt(2),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;d>e;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readString(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(h.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readString(4)===h.CENTRAL_FILE_HEADER;)a=new i({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(h.CENTRAL_DIRECTORY_END);if(-1===a)throw new Error("Corrupted zip : can't find end of central directory");if(this.reader.setIndex(a),this.checkSignature(h.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===g.MAX_VALUE_16BITS||this.diskWithCentralDirStart===g.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===g.MAX_VALUE_16BITS||this.centralDirRecords===g.MAX_VALUE_16BITS||this.centralDirSize===g.MAX_VALUE_32BITS||this.centralDirOffset===g.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),-1===a)throw new Error("Corrupted zip : can't find the ZIP64 end of central directory locator");this.reader.setIndex(a),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}},prepareReader:function(a){var b=g.getTypeOf(a);this.reader="string"!==b||j.uint8array?"nodebuffer"===b?new e(a):new f(g.transformTo("uint8array",a)):new d(a,this.loadOptions.optimizedBinaryString)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=c},{"./nodeBufferReader":12,"./object":13,"./signature":14,"./stringReader":15,"./support":17,"./uint8ArrayReader":18,"./utils":21,"./zipEntry":23}],23:[function(a,b){"use strict";function c(a,b){this.options=a,this.loadOptions=b}var d=a("./stringReader"),e=a("./utils"),f=a("./compressedObject"),g=a("./object");c.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},prepareCompressedContent:function(a,b,c){return function(){var d=a.index;a.setIndex(b);var e=a.readData(c);return a.setIndex(d),e}},prepareContent:function(a,b,c,d,f){return function(){var a=e.transformTo(d.uncompressInputType,this.getCompressedContent()),b=d.uncompress(a);if(b.length!==f)throw new Error("Bug : uncompressed data size mismatch");return b}},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readString(this.fileNameLength),a.skip(c),-1==this.compressedSize||-1==this.uncompressedSize)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize == -1 || uncompressedSize == -1)");if(b=e.findCompression(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+e.pretty(this.compressionMethod)+" unknown (inner file : "+this.fileName+")");if(this.decompressed=new f,this.decompressed.compressedSize=this.compressedSize,this.decompressed.uncompressedSize=this.uncompressedSize,this.decompressed.crc32=this.crc32,this.decompressed.compressionMethod=this.compressionMethod,this.decompressed.getCompressedContent=this.prepareCompressedContent(a,a.index,this.compressedSize,b),this.decompressed.getContent=this.prepareContent(a,a.index,this.compressedSize,b,this.uncompressedSize),this.loadOptions.checkCRC32&&(this.decompressed=e.transformTo("string",this.decompressed.getContent()),g.crc32(this.decompressed)!==this.crc32))throw new Error("Corrupted zip : CRC32 mismatch")},readCentralPart:function(a){if(this.versionMadeBy=a.readString(2),this.versionNeeded=a.readInt(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4),this.fileNameLength=a.readInt(2),this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");this.fileName=a.readString(this.fileNameLength),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readString(this.fileCommentLength),this.dir=16&this.externalFileAttributes?!0:!1},parseZIP64ExtraField:function(){if(this.extraFields[1]){var a=new d(this.extraFields[1].value);this.uncompressedSize===e.MAX_VALUE_32BITS&&(this.uncompressedSize=a.readInt(8)),this.compressedSize===e.MAX_VALUE_32BITS&&(this.compressedSize=a.readInt(8)),this.localHeaderOffset===e.MAX_VALUE_32BITS&&(this.localHeaderOffset=a.readInt(8)),this.diskNumberStart===e.MAX_VALUE_32BITS&&(this.diskNumberStart=a.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index;for(this.extraFields=this.extraFields||{};a.index<e+this.extraFieldsLength;)b=a.readInt(2),c=a.readInt(2),d=a.readString(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){if(this.useUTF8())this.fileName=g.utf8decode(this.fileName),this.fileComment=g.utf8decode(this.fileComment);else{var a=this.findExtraFieldUnicodePath();null!==a&&(this.fileName=a);var b=this.findExtraFieldUnicodeComment();null!==b&&(this.fileComment=b)}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileName)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileComment)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null}},b.exports=c},{"./compressedObject":2,"./object":13,"./stringReader":15,"./utils":21}],24:[function(a,b){"use strict";var c=a("./lib/utils/common").assign,d=a("./lib/deflate"),e=a("./lib/inflate"),f=a("./lib/zlib/constants"),g={};c(g,d,e,f),b.exports=g},{"./lib/deflate":25,"./lib/inflate":26,"./lib/utils/common":27,"./lib/zlib/constants":30}],25:[function(a,b,c){"use strict";function d(a,b){var c=new s(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}function f(a,b){return b=b||{},b.gzip=!0,d(a,b)}var g=a("./zlib/deflate.js"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=0,m=4,n=0,o=1,p=-1,q=0,r=8,s=function(a){this.options=h.assign({level:p,method:r,chunkSize:16384,windowBits:15,memLevel:8,strategy:q,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=g.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==n)throw new Error(j[c]);b.header&&g.deflateSetHeader(this.strm,b.header)};s.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?m:l,e.input="string"==typeof a?i.string2buf(a):a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new h.Buf8(f),e.next_out=0,e.avail_out=f),c=g.deflate(e,d),c!==o&&c!==n)return this.onEnd(c),this.ended=!0,!1;(0===e.avail_out||0===e.avail_in&&d===m)&&this.onData("string"===this.options.to?i.buf2binstring(h.shrinkBuf(e.output,e.next_out)):h.shrinkBuf(e.output,e.next_out))}while((e.avail_in>0||0===e.avail_out)&&c!==o);return d===m?(c=g.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===n):!0},s.prototype.onData=function(a){this.chunks.push(a)},s.prototype.onEnd=function(a){a===n&&(this.result="string"===this.options.to?this.chunks.join(""):h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=s,c.deflate=d,c.deflateRaw=e,c.gzip=f},{"./utils/common":27,"./utils/strings":28,"./zlib/deflate.js":32,"./zlib/messages":37,"./zlib/zstream":39}],26:[function(a,b,c){"use strict";function d(a,b){var c=new m(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}var f=a("./zlib/inflate.js"),g=a("./utils/common"),h=a("./utils/strings"),i=a("./zlib/constants"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=a("./zlib/gzheader"),m=function(a){this.options=g.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=f.inflateInit2(this.strm,b.windowBits);if(c!==i.Z_OK)throw new Error(j[c]);this.header=new l,f.inflateGetHeader(this.strm,this.header)};m.prototype.push=function(a,b){var c,d,e,j,k,l=this.strm,m=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?i.Z_FINISH:i.Z_NO_FLUSH,l.input="string"==typeof a?h.binstring2buf(a):a,l.next_in=0,l.avail_in=l.input.length;do{if(0===l.avail_out&&(l.output=new g.Buf8(m),l.next_out=0,l.avail_out=m),c=f.inflate(l,i.Z_NO_FLUSH),c!==i.Z_STREAM_END&&c!==i.Z_OK)return this.onEnd(c),this.ended=!0,!1;l.next_out&&(0===l.avail_out||c===i.Z_STREAM_END||0===l.avail_in&&d===i.Z_FINISH)&&("string"===this.options.to?(e=h.utf8border(l.output,l.next_out),j=l.next_out-e,k=h.buf2string(l.output,e),l.next_out=j,l.avail_out=m-j,j&&g.arraySet(l.output,l.output,e,j,0),this.onData(k)):this.onData(g.shrinkBuf(l.output,l.next_out)))}while(l.avail_in>0&&c!==i.Z_STREAM_END);return c===i.Z_STREAM_END&&(d=i.Z_FINISH),d===i.Z_FINISH?(c=f.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===i.Z_OK):!0},m.prototype.onData=function(a){this.chunks.push(a)},m.prototype.onEnd=function(a){a===i.Z_OK&&(this.result="string"===this.options.to?this.chunks.join(""):g.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=m,c.inflate=d,c.inflateRaw=e,c.ungzip=d},{"./utils/common":27,"./utils/strings":28,"./zlib/constants":30,"./zlib/gzheader":33,"./zlib/inflate.js":35,"./zlib/messages":37,"./zlib/zstream":39}],27:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;c>b;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;c>b;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],28:[function(a,b,c){"use strict";function d(a,b){if(65537>b&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;b>d;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;256>j;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=new e.Buf8(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;d>c;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;h>c;)if(f=a[c++],128>f)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&h>c;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:65536>f?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":27}],29:[function(a,b){"use strict";function c(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0}b.exports=c},{}],30:[function(a,b){b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],31:[function(a,b){"use strict";function c(){for(var a,b=[],c=0;256>c;c++){a=c;for(var d=0;8>d;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function d(a,b,c,d){var f=e,g=d+c;a=-1^a;for(var h=d;g>h;h++)a=a>>>8^f[255&(a^b[h])];return-1^a}var e=c();b.exports=d},{}],32:[function(a,b,c){"use strict";function d(a,b){return a.msg=G[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(C.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){D._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,C.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=E(a.adler,b,e,c):2===a.state.wrap&&(a.adler=F(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-jb?a.strstart-(a.w_size-jb):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ib,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&m>f);if(d=ib-(m-f),f=m-ib,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-jb)){C.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=hb)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+hb-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<hb)););}while(a.lookahead<jb&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===H)return sb;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return sb;if(a.strstart-a.block_start>=a.w_size-jb&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?sb:sb}function o(a,b){for(var c,d;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c)),a.match_length>=hb)if(d=D._tr_tally(a,a.strstart-a.match_start,a.match_length-hb),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=hb){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function p(a,b){for(var c,d,e;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=hb-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===S||a.match_length===hb&&a.strstart-a.match_start>4096)&&(a.match_length=hb-1)),a.prev_length>=hb&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-hb,d=D._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-hb),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=hb-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return sb}else if(a.match_available){if(d=D._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return sb}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=D._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ib){if(m(a),a.lookahead<=ib&&b===H)return sb;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=hb&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ib;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&f>e);a.match_length=ib-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=hb?(c=D._tr_tally(a,1,a.match_length-hb),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===H)return sb;break}if(a.match_length=0,c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function s(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=B[a.level].max_lazy,a.good_match=B[a.level].good_length,a.nice_match=B[a.level].nice_length,a.max_chain_length=B[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=hb-1,a.match_available=0,a.ins_h=0}function t(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=Y,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new C.Buf16(2*fb),this.dyn_dtree=new C.Buf16(2*(2*db+1)),this.bl_tree=new C.Buf16(2*(2*eb+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new C.Buf16(gb+1),this.heap=new C.Buf16(2*cb+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new C.Buf16(2*cb+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function u(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=X,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?lb:qb,a.adler=2===b.wrap?0:1,b.last_flush=H,D._tr_init(b),M):d(a,O)}function v(a){var b=u(a);return b===M&&s(a.state),b}function w(a,b){return a&&a.state?2!==a.state.wrap?O:(a.state.gzhead=b,M):O}function x(a,b,c,e,f,g){if(!a)return O;var h=1;if(b===R&&(b=6),0>e?(h=0,e=-e):e>15&&(h=2,e-=16),1>f||f>Z||c!==Y||8>e||e>15||0>b||b>9||0>g||g>V)return d(a,O);8===e&&(e=9);var i=new t;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+hb-1)/hb),i.window=new C.Buf8(2*i.w_size),i.head=new C.Buf16(i.hash_size),i.prev=new C.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new C.Buf8(i.pending_buf_size),i.d_buf=i.lit_bufsize>>1,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,v(a)}function y(a,b){return x(a,b,Y,$,_,W)}function z(a,b){var c,h,k,l;if(!a||!a.state||b>L||0>b)return a?d(a,O):O;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===rb&&b!==K)return d(a,0===a.avail_out?Q:O);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===lb)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=F(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=mb):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,wb),h.status=qb);else{var m=Y+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=T||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=kb),m+=31-m%31,h.status=qb,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===mb)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=nb)}else h.status=nb;if(h.status===nb)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=ob)}else h.status=ob;if(h.status===ob)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=pb)}else h.status=pb;if(h.status===pb&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=qb)):h.status=qb),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,M}else if(0===a.avail_in&&e(b)<=e(c)&&b!==K)return d(a,Q);if(h.status===rb&&0!==a.avail_in)return d(a,Q);if(0!==a.avail_in||0!==h.lookahead||b!==H&&h.status!==rb){var o=h.strategy===T?r(h,b):h.strategy===U?q(h,b):B[h.level].func(h,b);if((o===ub||o===vb)&&(h.status=rb),o===sb||o===ub)return 0===a.avail_out&&(h.last_flush=-1),M;if(o===tb&&(b===I?D._tr_align(h):b!==L&&(D._tr_stored_block(h,0,0,!1),b===J&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,M}return b!==K?M:h.wrap<=0?N:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?M:N)}function A(a){var b;return a&&a.state?(b=a.state.status,b!==lb&&b!==mb&&b!==nb&&b!==ob&&b!==pb&&b!==qb&&b!==rb?d(a,O):(a.state=null,b===qb?d(a,P):M)):O}var B,C=a("../utils/common"),D=a("./trees"),E=a("./adler32"),F=a("./crc32"),G=a("./messages"),H=0,I=1,J=3,K=4,L=5,M=0,N=1,O=-2,P=-3,Q=-5,R=-1,S=1,T=2,U=3,V=4,W=0,X=2,Y=8,Z=9,$=15,_=8,ab=29,bb=256,cb=bb+1+ab,db=30,eb=19,fb=2*cb+1,gb=15,hb=3,ib=258,jb=ib+hb+1,kb=32,lb=42,mb=69,nb=73,ob=91,pb=103,qb=113,rb=666,sb=1,tb=2,ub=3,vb=4,wb=3,xb=function(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e};B=[new xb(0,0,0,0,n),new xb(4,4,8,4,o),new xb(4,5,16,8,o),new xb(4,6,32,32,o),new xb(4,4,16,16,p),new xb(8,16,32,32,p),new xb(8,16,128,128,p),new xb(8,32,128,256,p),new xb(32,128,258,1024,p),new xb(32,258,258,4096,p)],c.deflateInit=y,c.deflateInit2=x,c.deflateReset=v,c.deflateResetKeep=u,c.deflateSetHeader=w,c.deflate=z,c.deflateEnd=A,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./messages":37,"./trees":38}],33:[function(a,b){"use strict";function c(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=c},{}],34:[function(a,b){"use strict";var c=30,d=12;b.exports=function(a,b){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;e=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=e.dmax,l=e.wsize,m=e.whave,n=e.wnext,o=e.window,p=e.hold,q=e.bits,r=e.lencode,s=e.distcode,t=(1<<e.lenbits)-1,u=(1<<e.distbits)-1;a:do{15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){e.mode=d;break a}a.msg="invalid literal/length code",e.mode=c;break a}x=65535&v,w&=15,w&&(w>q&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",e.mode=c;break a}if(y=65535&v,w&=15,w>q&&(p+=B[f++]<<q,q+=8,w>q&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",e.mode=c;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&e.correct){a.msg="invalid distance too far back",e.mode=c;break a}if(z=0,A=o,0===n){if(z+=l-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(w>n){if(z+=l+n-w,w-=n,x>w){x-=w;do C[h++]=o[z++];while(--w);if(z=0,x>n){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(g>f&&j>h);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=g>f?5+(g-f):5-(f-g),a.avail_out=j>h?257+(j-h):257-(h-j),e.hold=p,e.bits=q}},{}],35:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new r.Buf16(320),this.work=new r.Buf16(288),this.lendyn=null,this.distdyn=null,this.correct=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=K,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new r.Buf32(ob),b.distcode=b.distdyn=new r.Buf32(pb),b.correct=1,b.back=-1,C):F}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):F}function h(a,b){var c,d;return a&&a.state?(d=a.state,0>b?(c=0,b=-b):(c=(b>>4)+1,48>b&&(b&=15)),b&&(8>b||b>15)?F:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):F}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==C&&(a.state=null),c):F}function j(a){return i(a,rb)}function k(a){if(sb){var b;for(p=new r.Buf32(512),q=new r.Buf32(32),b=0;144>b;)a.lens[b++]=8;for(;256>b;)a.lens[b++]=9;for(;280>b;)a.lens[b++]=7;for(;288>b;)a.lens[b++]=8;for(v(x,a.lens,0,288,p,0,a.work,{bits:9}),b=0;32>b;)a.lens[b++]=5;v(y,a.lens,0,32,q,0,a.work,{bits:5}),sb=!1}a.lencode=p,a.lenbits=9,a.distcode=q,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new r.Buf8(f.wsize)),d>=f.wsize?(r.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),r.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(r.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,ob,pb,qb,rb,sb,tb,ub,vb,wb,xb,yb,zb,Ab=0,Bb=new r.Buf8(4),Cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return F;c=a.state,c.mode===V&&(c.mode=W),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xb=C;a:for(;;)switch(c.mode){case K:if(0===c.wrap){c.mode=W;break}for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0),m=0,n=0,c.mode=L;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=lb;break}if((15&m)!==J){a.msg="unknown compression method",c.mode=lb;break}if(m>>>=4,n-=4,wb=(15&m)+8,0===c.wbits)c.wbits=wb;else if(wb>c.wbits){a.msg="invalid window size",c.mode=lb;break}c.dmax=1<<wb,a.adler=c.check=1,c.mode=512&m?T:V,m=0,n=0;break;case L:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==J){a.msg="unknown compression method",c.mode=lb;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=lb;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=M;case M:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,Bb[2]=m>>>16&255,Bb[3]=m>>>24&255,c.check=t(c.check,Bb,4,0)),m=0,n=0,c.mode=N;case N:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=O;case O:if(1024&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=P;case P:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wb=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),r.arraySet(c.head.extra,e,g,q,wb)),512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=Q;case Q:if(2048&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.name+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=R;case R:if(4096&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.comment+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.comment=null);c.mode=S;case S:if(512&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=lb;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=V;break;case T:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=U;case U:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,E;a.adler=c.check=1,c.mode=V;case V:if(b===A||b===B)break a;case W:if(c.last){m>>>=7&n,n-=7&n,c.mode=ib;break}for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=X;break;case 1:if(k(c),c.mode=bb,b===B){m>>>=2,n-=2;break a}break;case 2:c.mode=$;break;case 3:a.msg="invalid block type",c.mode=lb}m>>>=2,n-=2;break;case X:for(m>>>=7&n,n-=7&n;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=lb;break}if(c.length=65535&m,m=0,n=0,c.mode=Y,b===B)break a;case Y:c.mode=Z;case Z:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;r.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=V;break;case $:for(;14>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=lb;break}c.have=0,c.mode=_;case _:for(;c.have<c.ncode;){for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Cb[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Cb[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,yb={bits:c.lenbits},xb=v(w,c.lens,0,19,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid code lengths set",c.mode=lb;break}c.have=0,c.mode=ab;case ab:for(;c.have<c.nlen+c.ndist;){for(;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(16>sb)m>>>=qb,n-=qb,c.lens[c.have++]=sb;else{if(16===sb){for(zb=qb+2;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qb,n-=qb,0===c.have){a.msg="invalid bit length repeat",c.mode=lb;break}wb=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sb){for(zb=qb+3;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=3+(7&m),m>>>=3,n-=3}else{for(zb=qb+7;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=lb;break}for(;q--;)c.lens[c.have++]=wb}}if(c.mode===lb)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=lb;break}if(c.lenbits=9,yb={bits:c.lenbits},xb=v(x,c.lens,0,c.nlen,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid literal/lengths set",c.mode=lb;break}if(c.distbits=6,c.distcode=c.distdyn,yb={bits:c.distbits},xb=v(y,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,yb),c.distbits=yb.bits,xb){a.msg="invalid distances set",c.mode=lb;break}if(c.mode=bb,b===B)break a;case bb:c.mode=cb;case cb:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,u(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===V&&(c.back=-1);break}for(c.back=0;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(rb&&0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.lencode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,c.length=sb,0===rb){c.mode=hb;break}if(32&rb){c.back=-1,c.mode=V;break}if(64&rb){a.msg="invalid literal/length code",c.mode=lb;break}c.extra=15&rb,c.mode=db;case db:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=eb;case eb:for(;Ab=c.distcode[m&(1<<c.distbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.distcode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,64&rb){a.msg="invalid distance code",c.mode=lb;break}c.offset=sb,c.extra=15&rb,c.mode=fb;case fb:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=lb;break}c.mode=gb;case gb:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.correct){a.msg="invalid distance too far back",c.mode=lb;break}q>c.wnext?(q-=c.wnext,ob=c.wsize-q):ob=c.wnext-q,q>c.length&&(q=c.length),pb=c.window}else pb=f,ob=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pb[ob++];while(--q);0===c.length&&(c.mode=cb);break;case hb:if(0===j)break a;f[h++]=c.length,j--,c.mode=cb;break;case ib:if(c.wrap){for(;32>n;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?t(c.check,f,p,h-p):s(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=lb;break}m=0,n=0}c.mode=jb;case jb:if(c.wrap&&c.flags){for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=lb;break}m=0,n=0}c.mode=kb;case kb:xb=D;break a;case lb:xb=G;break a;case mb:return H;case nb:default:return F}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<lb&&(c.mode<ib||b!==z))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=mb,H):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?t(c.check,f,p,a.next_out-p):s(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===V?128:0)+(c.mode===bb||c.mode===Y?256:0),(0===o&&0===p||b===z)&&xb===C&&(xb=I),xb)}function n(a){if(!a||!a.state)return F;var b=a.state;return b.window&&(b.window=null),a.state=null,C}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?F:(c.head=b,b.done=!1,C)):F}var p,q,r=a("../utils/common"),s=a("./adler32"),t=a("./crc32"),u=a("./inffast"),v=a("./inftrees"),w=0,x=1,y=2,z=4,A=5,B=6,C=0,D=1,E=2,F=-2,G=-3,H=-4,I=-5,J=8,K=1,L=2,M=3,N=4,O=5,P=6,Q=7,R=8,S=9,T=10,U=11,V=12,W=13,X=14,Y=15,Z=16,$=17,_=18,ab=19,bb=20,cb=21,db=22,eb=23,fb=24,gb=25,hb=26,ib=27,jb=28,kb=29,lb=30,mb=31,nb=32,ob=852,pb=592,qb=15,rb=qb,sb=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./inffast":34,"./inftrees":36}],36:[function(a,b){"use strict";var c=a("../utils/common"),d=15,e=852,f=592,g=0,h=1,i=2,j=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],k=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],l=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],m=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,n,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new c.Buf16(d+1),Q=new c.Buf16(d+1),R=null,S=0;for(D=0;d>=D;D++)P[D]=0;for(E=0;o>E;E++)P[b[n+E]]++;for(H=C,G=d;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;G>F&&0===P[F];F++);for(F>H&&(H=F),K=1,D=1;d>=D;D++)if(K<<=1,K-=P[D],0>K)return-1;if(K>0&&(a===g||1!==G))return-1;for(Q[1]=0,D=1;d>D;D++)Q[D+1]=Q[D]+P[D];for(E=0;o>E;E++)0!==b[n+E]&&(r[Q[b[n+E]]++]=E);if(a===g?(N=R=r,y=19):a===h?(N=j,O-=257,R=k,S-=257,y=256):(N=l,R=m,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===h&&L>e||a===i&&L>f)return 1;for(var T=0;;){T++,z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[n+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;G>I+J&&(K-=P[I+J],!(0>=K));)I++,K<<=1;if(L+=1<<I,a===h&&L>e||a===i&&L>f)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":27}],37:[function(a,b){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],38:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a){return 256>a?gb[a]:gb[256+(a>>>7)]}function f(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function g(a,b,c){a.bi_valid>V-c?(a.bi_buf|=b<<a.bi_valid&65535,f(a,a.bi_buf),a.bi_buf=b>>V-a.bi_valid,a.bi_valid+=c-V):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function h(a,b,c){g(a,c[2*b],c[2*b+1])}function i(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function j(a){16===a.bi_valid?(f(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function k(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;U>=f;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,c=a.heap_max+1;T>c;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function l(a,b,c){var d,e,f=new Array(U+1),g=0;for(d=1;U>=d;d++)f[d]=g=g+c[d-1]<<1;for(e=0;b>=e;e++){var h=a[2*e+1];0!==h&&(a[2*e]=i(f[h]++,h))}}function m(){var a,b,c,d,e,f=new Array(U+1);for(c=0,d=0;O-1>d;d++)for(ib[d]=c,a=0;a<1<<_[d];a++)hb[c++]=d;for(hb[c-1]=d,e=0,d=0;16>d;d++)for(jb[d]=e,a=0;a<1<<ab[d];a++)gb[e++]=d;for(e>>=7;R>d;d++)for(jb[d]=e<<7,a=0;a<1<<ab[d]-7;a++)gb[256+e++]=d;for(b=0;U>=b;b++)f[b]=0;for(a=0;143>=a;)eb[2*a+1]=8,a++,f[8]++;for(;255>=a;)eb[2*a+1]=9,a++,f[9]++;for(;279>=a;)eb[2*a+1]=7,a++,f[7]++;for(;287>=a;)eb[2*a+1]=8,a++,f[8]++;for(l(eb,Q+1,f),a=0;R>a;a++)fb[2*a+1]=5,fb[2*a]=i(a,5);kb=new nb(eb,_,P+1,Q,U),lb=new nb(fb,ab,0,R,U),mb=new nb(new Array(0),bb,0,S,W)}function n(a){var b;for(b=0;Q>b;b++)a.dyn_ltree[2*b]=0;for(b=0;R>b;b++)a.dyn_dtree[2*b]=0;for(b=0;S>b;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*X]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function o(a){a.bi_valid>8?f(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function p(a,b,c,d){o(a),d&&(f(a,c),f(a,~c)),E.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function q(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function r(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&q(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!q(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function s(a,b,c){var d,f,i,j,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],f=a.pending_buf[a.l_buf+k],k++,0===d?h(a,f,b):(i=hb[f],h(a,i+P+1,b),j=_[i],0!==j&&(f-=ib[i],g(a,f,j)),d--,i=e(d),h(a,i,c),j=ab[i],0!==j&&(d-=jb[i],g(a,d,j)));while(k<a.last_lit);h(a,X,b)}function t(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=T,c=0;i>c;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=2>j?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)r(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],r(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,r(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],k(a,b),l(f,j,a.bl_count)}function u(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;c>=d;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(j>h?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*Y]++):10>=h?a.bl_tree[2*Z]++:a.bl_tree[2*$]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function v(a,b,c){var d,e,f=-1,i=b[1],j=0,k=7,l=4;for(0===i&&(k=138,l=3),d=0;c>=d;d++)if(e=i,i=b[2*(d+1)+1],!(++j<k&&e===i)){if(l>j){do h(a,e,a.bl_tree);while(0!==--j)}else 0!==e?(e!==f&&(h(a,e,a.bl_tree),j--),h(a,Y,a.bl_tree),g(a,j-3,2)):10>=j?(h(a,Z,a.bl_tree),g(a,j-3,3)):(h(a,$,a.bl_tree),g(a,j-11,7));j=0,f=e,0===i?(k=138,l=3):e===i?(k=6,l=3):(k=7,l=4)}}function w(a){var b;for(u(a,a.dyn_ltree,a.l_desc.max_code),u(a,a.dyn_dtree,a.d_desc.max_code),t(a,a.bl_desc),b=S-1;b>=3&&0===a.bl_tree[2*cb[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function x(a,b,c,d){var e;for(g(a,b-257,5),g(a,c-1,5),g(a,d-4,4),e=0;d>e;e++)g(a,a.bl_tree[2*cb[e]+1],3);v(a,a.dyn_ltree,b-1),v(a,a.dyn_dtree,c-1)}function y(a){var b,c=4093624447;for(b=0;31>=b;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return G;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return H;for(b=32;P>b;b++)if(0!==a.dyn_ltree[2*b])return H;return G}function z(a){pb||(m(),pb=!0),a.l_desc=new ob(a.dyn_ltree,kb),a.d_desc=new ob(a.dyn_dtree,lb),a.bl_desc=new ob(a.bl_tree,mb),a.bi_buf=0,a.bi_valid=0,n(a)}function A(a,b,c,d){g(a,(J<<1)+(d?1:0),3),p(a,b,c,!0)}function B(a){g(a,K<<1,3),h(a,X,eb),j(a)}function C(a,b,c,d){var e,f,h=0;a.level>0?(a.strm.data_type===I&&(a.strm.data_type=y(a)),t(a,a.l_desc),t(a,a.d_desc),h=w(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,e>=f&&(e=f)):e=f=c+5,e>=c+4&&-1!==b?A(a,b,c,d):a.strategy===F||f===e?(g(a,(K<<1)+(d?1:0),3),s(a,eb,fb)):(g(a,(L<<1)+(d?1:0),3),x(a,a.l_desc.max_code+1,a.d_desc.max_code+1,h+1),s(a,a.dyn_ltree,a.dyn_dtree)),n(a),d&&o(a)}function D(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(hb[c]+P+1)]++,a.dyn_dtree[2*e(b)]++),a.last_lit===a.lit_bufsize-1}var E=a("../utils/common"),F=4,G=0,H=1,I=2,J=0,K=1,L=2,M=3,N=258,O=29,P=256,Q=P+1+O,R=30,S=19,T=2*Q+1,U=15,V=16,W=7,X=256,Y=16,Z=17,$=18,_=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ab=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],bb=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],db=512,eb=new Array(2*(Q+2));d(eb);var fb=new Array(2*R);d(fb);var gb=new Array(db);d(gb);var hb=new Array(N-M+1);d(hb);var ib=new Array(O);d(ib);var jb=new Array(R);d(jb);var kb,lb,mb,nb=function(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length},ob=function(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b},pb=!1;c._tr_init=z,c._tr_stored_block=A,c._tr_flush_block=C,c._tr_tally=D,c._tr_align=B},{"../utils/common":27}],39:[function(a,b){"use strict";function c(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=c},{}]},{},[9])(9)});'use strict';if(tr.isVinn){global.window={};}'use strict';if(tr.isVinn){global.JSZip=global.window.JSZip;global.window=undefined;}else if(tr.isNode){const jsZipAbsPath=HTMLImportsLoader.hrefToAbsolutePath('/jszip.min.js');const jsZipModule=require(jsZipAbsPath);global.JSZip=jsZipModule;}'use strict';tr.exportTo('tr.e.importer',function(){const GZIP_MEMBER_HEADER_ID_SIZE=3;const GZIP_HEADER_ID1=0x1f;const GZIP_HEADER_ID2=0x8b;const GZIP_DEFLATE_COMPRESSION=8;function GzipImporter(model,eventData){this.inflateAsTraceStream=false;if(typeof(eventData)==='string'||eventData instanceof String){eventData=JSZip.utils.transformTo('uint8array',eventData);}else if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);}else if(eventData instanceof tr.b.InMemoryTraceStream){eventData=eventData.data;this.inflateAsTraceStream_=true;}else{throw new Error('Unknown gzip data format');} +return{InMemoryTraceStream,};});!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,f,g,h,i,j="",k=0;k<a.length;)b=a.charCodeAt(k++),c=a.charCodeAt(k++),e=a.charCodeAt(k++),f=b>>2,g=(3&b)<<4|c>>4,h=(15&c)<<2|e>>6,i=63&e,isNaN(c)?h=i=64:isNaN(e)&&(i=64),j=j+d.charAt(f)+d.charAt(g)+d.charAt(h)+d.charAt(i);return j},c.decode=function(a){var b,c,e,f,g,h,i,j="",k=0;for(a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");k<a.length;)f=d.indexOf(a.charAt(k++)),g=d.indexOf(a.charAt(k++)),h=d.indexOf(a.charAt(k++)),i=d.indexOf(a.charAt(k++)),b=f<<2|g>>4,c=(15&g)<<4|h>>2,e=(3&h)<<6|i,j+=String.fromCharCode(b),64!=h&&(j+=String.fromCharCode(c)),64!=i&&(j+=String.fromCharCode(e));return j}},{}],2:[function(a,b){"use strict";function c(){this.compressedSize=0,this.uncompressedSize=0,this.crc32=0,this.compressionMethod=null,this.compressedContent=null}c.prototype={getContent:function(){return null},getCompressedContent:function(){return null}},b.exports=c},{}],3:[function(a,b,c){"use strict";c.STORE={magic:"\x00\x00",compress:function(a){return a},uncompress:function(a){return a},compressInputType:null,uncompressInputType:null},c.DEFLATE=a("./flate")},{"./flate":8}],4:[function(a,b){"use strict";var c=a("./utils"),d=[0,1996959894,3993919788,2567524794,124634137,1886057615,3915621685,2657392035,249268274,2044508324,3772115230,2547177864,162941995,2125561021,3887607047,2428444049,498536548,1789927666,4089016648,2227061214,450548861,1843258603,4107580753,2211677639,325883990,1684777152,4251122042,2321926636,335633487,1661365465,4195302755,2366115317,997073096,1281953886,3579855332,2724688242,1006888145,1258607687,3524101629,2768942443,901097722,1119000684,3686517206,2898065728,853044451,1172266101,3705015759,2882616665,651767980,1373503546,3369554304,3218104598,565507253,1454621731,3485111705,3099436303,671266974,1594198024,3322730930,2970347812,795835527,1483230225,3244367275,3060149565,1994146192,31158534,2563907772,4023717930,1907459465,112637215,2680153253,3904427059,2013776290,251722036,2517215374,3775830040,2137656763,141376813,2439277719,3865271297,1802195444,476864866,2238001368,4066508878,1812370925,453092731,2181625025,4111451223,1706088902,314042704,2344532202,4240017532,1658658271,366619977,2362670323,4224994405,1303535960,984961486,2747007092,3569037538,1256170817,1037604311,2765210733,3554079995,1131014506,879679996,2909243462,3663771856,1141124467,855842277,2852801631,3708648649,1342533948,654459306,3188396048,3373015174,1466479909,544179635,3110523913,3462522015,1591671054,702138776,2966460450,3352799412,1504918807,783551873,3082640443,3233442989,3988292384,2596254646,62317068,1957810842,3939845945,2647816111,81470997,1943803523,3814918930,2489596804,225274430,2053790376,3826175755,2466906013,167816743,2097651377,4027552580,2265490386,503444072,1762050814,4150417245,2154129355,426522225,1852507879,4275313526,2312317920,282753626,1742555852,4189708143,2394877945,397917763,1622183637,3604390888,2714866558,953729732,1340076626,3518719985,2797360999,1068828381,1219638859,3624741850,2936675148,906185462,1090812512,3747672003,2825379669,829329135,1181335161,3412177804,3160834842,628085408,1382605366,3423369109,3138078467,570562233,1426400815,3317316542,2998733608,733239954,1555261956,3268935591,3050360625,752459403,1541320221,2607071920,3965973030,1969922972,40735498,2617837225,3943577151,1913087877,83908371,2512341634,3803740692,2075208622,213261112,2463272603,3855990285,2094854071,198958881,2262029012,4057260610,1759359992,534414190,2176718541,4139329115,1873836001,414664567,2282248934,4279200368,1711684554,285281116,2405801727,4167216745,1634467795,376229701,2685067896,3608007406,1308918612,956543938,2808555105,3495958263,1231636301,1047427035,2932959818,3654703836,1088359270,936918e3,2847714899,3736837829,1202900863,817233897,3183342108,3401237130,1404277552,615818150,3134207493,3453421203,1423857449,601450431,3009837614,3294710456,1567103746,711928724,3020668471,3272380065,1510334235,755167117];b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var e="string"!==c.getTypeOf(a);"undefined"==typeof b&&(b=0);var f=0,g=0,h=0;b=-1^b;for(var i=0,j=a.length;j>i;i++)h=e?a[i]:a.charCodeAt(i),g=255&(b^h),f=d[g],b=b>>>8^f;return-1^b}},{"./utils":21}],5:[function(a,b){"use strict";function c(){this.data=null,this.length=0,this.index=0}var d=a("./utils");c.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<a||0>a)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return d.transformTo("string",this.readData(a))},readData:function(){},lastIndexOfSignature:function(){},readDate:function(){var a=this.readInt(4);return new Date((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1)}},b.exports=c},{"./utils":21}],6:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!1,c.date=null,c.compression=null,c.comment=null},{}],7:[function(a,b,c){"use strict";var d=a("./utils");c.string2binary=function(a){return d.string2binary(a)},c.string2Uint8Array=function(a){return d.transformTo("uint8array",a)},c.uint8Array2String=function(a){return d.transformTo("string",a)},c.string2Blob=function(a){var b=d.transformTo("arraybuffer",a);return d.arrayBuffer2Blob(b)},c.arrayBuffer2Blob=function(a){return d.arrayBuffer2Blob(a)},c.transformTo=function(a,b){return d.transformTo(a,b)},c.getTypeOf=function(a){return d.getTypeOf(a)},c.checkSupport=function(a){return d.checkSupport(a)},c.MAX_VALUE_16BITS=d.MAX_VALUE_16BITS,c.MAX_VALUE_32BITS=d.MAX_VALUE_32BITS,c.pretty=function(a){return d.pretty(a)},c.findCompression=function(a){return d.findCompression(a)},c.isRegExp=function(a){return d.isRegExp(a)}},{"./utils":21}],8:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,e=a("pako");c.uncompressInputType=d?"uint8array":"array",c.compressInputType=d?"uint8array":"array",c.magic="\b\x00",c.compress=function(a){return e.deflateRaw(a)},c.uncompress=function(a){return e.inflateRaw(a)}},{pako:24}],9:[function(a,b){"use strict";function c(a,b){return this instanceof c?(this.files={},this.comment=null,this.root="",a&&this.load(a,b),void(this.clone=function(){var a=new c;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a})):new c(a,b)}var d=a("./base64");c.prototype=a("./object"),c.prototype.load=a("./load"),c.support=a("./support"),c.defaults=a("./defaults"),c.utils=a("./deprecatedPublicUtils"),c.base64={encode:function(a){return d.encode(a)},decode:function(a){return d.decode(a)}},c.compressions=a("./compressions"),b.exports=c},{"./base64":1,"./compressions":3,"./defaults":6,"./deprecatedPublicUtils":7,"./load":10,"./object":13,"./support":17}],10:[function(a,b){"use strict";var c=a("./base64"),d=a("./zipEntries");b.exports=function(a,b){var e,f,g,h;for(b=b||{},b.base64&&(a=c.decode(a)),f=new d(a,b),e=f.files,g=0;g<e.length;g++)h=e[g],this.file(h.fileName,h.decompressed,{binary:!0,optimizedBinaryString:!0,date:h.date,dir:h.dir,comment:h.fileComment.length?h.fileComment:null,createFolders:b.createFolders});return f.zipComment.length&&(this.comment=f.zipComment),this}},{"./base64":1,"./zipEntries":22}],11:[function(a,b){(function(a){"use strict";b.exports=function(b,c){return new a(b,c)},b.exports.test=function(b){return a.isBuffer(b)}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],12:[function(a,b){"use strict";function c(a){this.data=a,this.length=this.data.length,this.index=0}var d=a("./uint8ArrayReader");c.prototype=new d,c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./uint8ArrayReader":18}],13:[function(a,b){"use strict";var c=a("./support"),d=a("./utils"),e=a("./crc32"),f=a("./signature"),g=a("./defaults"),h=a("./base64"),i=a("./compressions"),j=a("./compressedObject"),k=a("./nodeBuffer"),l=a("./utf8"),m=a("./stringWriter"),n=a("./uint8ArrayWriter"),o=function(a){if(a._data instanceof j&&(a._data=a._data.getContent(),a.options.binary=!0,a.options.base64=!1,"uint8array"===d.getTypeOf(a._data))){var b=a._data;a._data=new Uint8Array(b.length),0!==b.length&&a._data.set(b,0)}return a._data},p=function(a){var b=o(a),e=d.getTypeOf(b);return"string"===e?!a.options.binary&&c.nodebuffer?k(b,"utf-8"):a.asBinary():b},q=function(a){var b=o(this);return null===b||"undefined"==typeof b?"":(this.options.base64&&(b=h.decode(b)),b=a&&this.options.binary?A.utf8decode(b):d.transformTo("string",b),a||this.options.binary||(b=d.transformTo("string",A.utf8encode(b))),b)},r=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this._data=b,this.options=c,this._initialMetadata={dir:c.dir,date:c.date}};r.prototype={asText:function(){return q.call(this,!0)},asBinary:function(){return q.call(this,!1)},asNodeBuffer:function(){var a=p(this);return d.transformTo("nodebuffer",a)},asUint8Array:function(){var a=p(this);return d.transformTo("uint8array",a)},asArrayBuffer:function(){return this.asUint8Array().buffer}};var s=function(a,b){var c,d="";for(c=0;b>c;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},t=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},u=function(a){return a=a||{},a.base64!==!0||null!==a.binary&&void 0!==a.binary||(a.binary=!0),a=t(a,g),a.date=a.date||new Date,null!==a.compression&&(a.compression=a.compression.toUpperCase()),a},v=function(a,b,c){var e,f=d.getTypeOf(b);if(c=u(c),c.createFolders&&(e=w(a))&&x.call(this,e,!0),c.dir||null===b||"undefined"==typeof b)c.base64=!1,c.binary=!1,b=null;else if("string"===f)c.binary&&!c.base64&&c.optimizedBinaryString!==!0&&(b=d.string2binary(b));else{if(c.base64=!1,c.binary=!0,!(f||b instanceof j))throw new Error("The data of '"+a+"' is in an unsupported format !");"arraybuffer"===f&&(b=d.transformTo("uint8array",b))}var g=new r(a,b,c);return this.files[a]=g,g},w=function(a){"/"==a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},x=function(a,b){return"/"!=a.slice(-1)&&(a+="/"),b="undefined"!=typeof b?b:!1,this.files[a]||v.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},y=function(a,b){var c,f=new j;return a._data instanceof j?(f.uncompressedSize=a._data.uncompressedSize,f.crc32=a._data.crc32,0===f.uncompressedSize||a.dir?(b=i.STORE,f.compressedContent="",f.crc32=0):a._data.compressionMethod===b.magic?f.compressedContent=a._data.getCompressedContent():(c=a._data.getContent(),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c)))):(c=p(a),(!c||0===c.length||a.dir)&&(b=i.STORE,c=""),f.uncompressedSize=c.length,f.crc32=e(c),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c))),f.compressedSize=f.compressedContent.length,f.compressionMethod=b.magic,f},z=function(a,b,c,g){var h,i,j,k,m=(c.compressedContent,d.transformTo("string",l.utf8encode(b.name))),n=b.comment||"",o=d.transformTo("string",l.utf8encode(n)),p=m.length!==b.name.length,q=o.length!==n.length,r=b.options,t="",u="",v="";j=b._initialMetadata.dir!==b.dir?b.dir:r.dir,k=b._initialMetadata.date!==b.date?b.date:r.date,h=k.getHours(),h<<=6,h|=k.getMinutes(),h<<=5,h|=k.getSeconds()/2,i=k.getFullYear()-1980,i<<=4,i|=k.getMonth()+1,i<<=5,i|=k.getDate(),p&&(u=s(1,1)+s(e(m),4)+m,t+="up"+s(u.length,2)+u),q&&(v=s(1,1)+s(this.crc32(o),4)+o,t+="uc"+s(v.length,2)+v);var w="";w+="\n\x00",w+=p||q?"\x00\b":"\x00\x00",w+=c.compressionMethod,w+=s(h,2),w+=s(i,2),w+=s(c.crc32,4),w+=s(c.compressedSize,4),w+=s(c.uncompressedSize,4),w+=s(m.length,2),w+=s(t.length,2);var x=f.LOCAL_FILE_HEADER+w+m+t,y=f.CENTRAL_FILE_HEADER+"\x00"+w+s(o.length,2)+"\x00\x00\x00\x00"+(j===!0?"\x00\x00\x00":"\x00\x00\x00\x00")+s(g,4)+m+t+o;return{fileRecord:x,dirRecord:y,compressedObject:c}},A={load:function(){throw new Error("Load method is not defined. Is the file jszip-load.js included ?")},filter:function(a){var b,c,d,e,f=[];for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],e=new r(d.name,d._data,t(d.options)),c=b.slice(this.root.length,b.length),b.slice(0,this.root.length)===this.root&&a(c,e)&&f.push(e));return f},file:function(a,b,c){if(1===arguments.length){if(d.isRegExp(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}return this.filter(function(b,c){return!c.dir&&b===a})[0]||null}return a=this.root+a,v.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d.isRegExp(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=x.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!=a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){a=t(a||{},{base64:!0,compression:"STORE",type:"base64",comment:null}),d.checkSupport(a.type);var b,c,e=[],g=0,j=0,k=d.transformTo("string",this.utf8encode(a.comment||this.comment||""));for(var l in this.files)if(this.files.hasOwnProperty(l)){var o=this.files[l],p=o.options.compression||a.compression.toUpperCase(),q=i[p];if(!q)throw new Error(p+" is not a valid compression method !");var r=y.call(this,o,q),u=z.call(this,l,o,r,g);g+=u.fileRecord.length+r.compressedSize,j+=u.dirRecord.length,e.push(u)}var v="";v=f.CENTRAL_DIRECTORY_END+"\x00\x00\x00\x00"+s(e.length,2)+s(e.length,2)+s(j,4)+s(g,4)+s(k.length,2)+k;var w=a.type.toLowerCase();for(b="uint8array"===w||"arraybuffer"===w||"blob"===w||"nodebuffer"===w?new n(g+j+v.length):new m(g+j+v.length),c=0;c<e.length;c++)b.append(e[c].fileRecord),b.append(e[c].compressedObject.compressedContent);for(c=0;c<e.length;c++)b.append(e[c].dirRecord);b.append(v);var x=b.finalize();switch(a.type.toLowerCase()){case"uint8array":case"arraybuffer":case"nodebuffer":return d.transformTo(a.type.toLowerCase(),x);case"blob":return d.arrayBuffer2Blob(d.transformTo("arraybuffer",x));case"base64":return a.base64?h.encode(x):x;default:return x}},crc32:function(a,b){return e(a,b)},utf8encode:function(a){return d.transformTo("string",l.utf8encode(a))},utf8decode:function(a){return l.utf8decode(a)}};b.exports=A},{"./base64":1,"./compressedObject":2,"./compressions":3,"./crc32":4,"./defaults":6,"./nodeBuffer":11,"./signature":14,"./stringWriter":16,"./support":17,"./uint8ArrayWriter":19,"./utf8":20,"./utils":21}],14:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],15:[function(a,b){"use strict";function c(a,b){this.data=a,b||(this.data=e.string2binary(this.data)),this.length=this.data.length,this.index=0}var d=a("./dataReader"),e=a("./utils");c.prototype=new d,c.prototype.byteAt=function(a){return this.data.charCodeAt(a)},c.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)},c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5,"./utils":21}],16:[function(a,b){"use strict";var c=a("./utils"),d=function(){this.data=[]};d.prototype={append:function(a){a=c.transformTo("string",a),this.data.push(a)},finalize:function(){return this.data.join("")}},b.exports=d},{"./utils":21}],17:[function(a,b,c){(function(a){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof a,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var b=new ArrayBuffer(0);try{c.blob=0===new Blob([b],{type:"application/zip"}).size}catch(d){try{var e=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,f=new e;f.append(b),c.blob=0===f.getBlob("application/zip").size}catch(d){c.blob=!1}}}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],18:[function(a,b){"use strict";function c(a){a&&(this.data=a,this.length=this.data.length,this.index=0)}var d=a("./dataReader");c.prototype=new d,c.prototype.byteAt=function(a){return this.data[a]},c.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f;return-1},c.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5}],19:[function(a,b){"use strict";var c=a("./utils"),d=function(a){this.data=new Uint8Array(a),this.index=0};d.prototype={append:function(a){0!==a.length&&(a=c.transformTo("uint8array",a),this.data.set(a,this.index),this.index+=a.length)},finalize:function(){return this.data}},b.exports=d},{"./utils":21}],20:[function(a,b,c){"use strict";for(var d=a("./utils"),e=a("./support"),f=a("./nodeBuffer"),g=new Array(256),h=0;256>h;h++)g[h]=h>=252?6:h>=248?5:h>=240?4:h>=224?3:h>=192?2:1;g[254]=g[254]=1;var i=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=e.uint8array?new Uint8Array(i):new Array(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},j=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+g[a[c]]>b?c:b},k=function(a){var b,c,e,f,h=a.length,i=new Array(2*h);for(c=0,b=0;h>b;)if(e=a[b++],128>e)i[c++]=e;else if(f=g[e],f>4)i[c++]=65533,b+=f-1;else{for(e&=2===f?31:3===f?15:7;f>1&&h>b;)e=e<<6|63&a[b++],f--;f>1?i[c++]=65533:65536>e?i[c++]=e:(e-=65536,i[c++]=55296|e>>10&1023,i[c++]=56320|1023&e)}return i.length!==c&&(i.subarray?i=i.subarray(0,c):i.length=c),d.applyFromCharCode(i)};c.utf8encode=function(a){return e.nodebuffer?f(a,"utf-8"):i(a)},c.utf8decode=function(a){if(e.nodebuffer)return d.transformTo("nodebuffer",a).toString("utf-8");a=d.transformTo(e.uint8array?"uint8array":"array",a);for(var b=[],c=0,f=a.length,g=65536;f>c;){var h=j(a,Math.min(c+g,f));b.push(e.uint8array?k(a.subarray(c,h)):k(a.slice(c,h))),c=h}return b.join("")}},{"./nodeBuffer":11,"./support":17,"./utils":21}],21:[function(a,b,c){"use strict";function d(a){return a}function e(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function f(a){var b=65536,d=[],e=a.length,f=c.getTypeOf(a),g=0,h=!0;try{switch(f){case"uint8array":String.fromCharCode.apply(null,new Uint8Array(0));break;case"nodebuffer":String.fromCharCode.apply(null,j(0))}}catch(i){h=!1}if(!h){for(var k="",l=0;l<a.length;l++)k+=String.fromCharCode(a[l]);return k}for(;e>g&&b>1;)try{d.push("array"===f||"nodebuffer"===f?String.fromCharCode.apply(null,a.slice(g,Math.min(g+b,e))):String.fromCharCode.apply(null,a.subarray(g,Math.min(g+b,e)))),g+=b}catch(i){b=Math.floor(b/2)}return d.join("")}function g(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];return b}var h=a("./support"),i=a("./compressions"),j=a("./nodeBuffer");c.string2binary=function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(255&a.charCodeAt(c));return b},c.arrayBuffer2Blob=function(a){c.checkSupport("blob");try{return new Blob([a],{type:"application/zip"})}catch(b){try{var d=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,e=new d;return e.append(a),e.getBlob("application/zip")}catch(b){throw new Error("Bug : can't construct the Blob.")}}},c.applyFromCharCode=f;var k={};k.string={string:d,array:function(a){return e(a,new Array(a.length))},arraybuffer:function(a){return k.string.uint8array(a).buffer},uint8array:function(a){return e(a,new Uint8Array(a.length))},nodebuffer:function(a){return e(a,j(a.length))}},k.array={string:f,array:d,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(a)}},k.arraybuffer={string:function(a){return f(new Uint8Array(a))},array:function(a){return g(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:d,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(new Uint8Array(a))}},k.uint8array={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:d,nodebuffer:function(a){return j(a)}},k.nodebuffer={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return k.nodebuffer.uint8array(a).buffer},uint8array:function(a){return g(a,new Uint8Array(a.length))},nodebuffer:d},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=k[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":h.nodebuffer&&j.test(a)?"nodebuffer":h.uint8array&&a instanceof Uint8Array?"uint8array":h.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=h[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this browser")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(16>b?"0":"")+b.toString(16).toUpperCase();return d},c.findCompression=function(a){for(var b in i)if(i.hasOwnProperty(b)&&i[b].magic===a)return i[b];return null},c.isRegExp=function(a){return"[object RegExp]"===Object.prototype.toString.call(a)}},{"./compressions":3,"./nodeBuffer":11,"./support":17}],22:[function(a,b){"use strict";function c(a,b){this.files=[],this.loadOptions=b,a&&this.load(a)}var d=a("./stringReader"),e=a("./nodeBufferReader"),f=a("./uint8ArrayReader"),g=a("./utils"),h=a("./signature"),i=a("./zipEntry"),j=a("./support"),k=a("./object");c.prototype={checkSignature:function(a){var b=this.reader.readString(4);if(b!==a)throw new Error("Corrupted zip or bug : unexpected signature ("+g.pretty(b)+", expected "+g.pretty(a)+")")},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2),this.zipComment=this.reader.readString(this.zipCommentLength),this.zipComment=k.utf8decode(this.zipComment)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.versionMadeBy=this.reader.readString(2),this.versionNeeded=this.reader.readInt(2),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;d>e;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readString(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(h.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readString(4)===h.CENTRAL_FILE_HEADER;)a=new i({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(h.CENTRAL_DIRECTORY_END);if(-1===a)throw new Error("Corrupted zip : can't find end of central directory");if(this.reader.setIndex(a),this.checkSignature(h.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===g.MAX_VALUE_16BITS||this.diskWithCentralDirStart===g.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===g.MAX_VALUE_16BITS||this.centralDirRecords===g.MAX_VALUE_16BITS||this.centralDirSize===g.MAX_VALUE_32BITS||this.centralDirOffset===g.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),-1===a)throw new Error("Corrupted zip : can't find the ZIP64 end of central directory locator");this.reader.setIndex(a),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}},prepareReader:function(a){var b=g.getTypeOf(a);this.reader="string"!==b||j.uint8array?"nodebuffer"===b?new e(a):new f(g.transformTo("uint8array",a)):new d(a,this.loadOptions.optimizedBinaryString)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=c},{"./nodeBufferReader":12,"./object":13,"./signature":14,"./stringReader":15,"./support":17,"./uint8ArrayReader":18,"./utils":21,"./zipEntry":23}],23:[function(a,b){"use strict";function c(a,b){this.options=a,this.loadOptions=b}var d=a("./stringReader"),e=a("./utils"),f=a("./compressedObject"),g=a("./object");c.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},prepareCompressedContent:function(a,b,c){return function(){var d=a.index;a.setIndex(b);var e=a.readData(c);return a.setIndex(d),e}},prepareContent:function(a,b,c,d,f){return function(){var a=e.transformTo(d.uncompressInputType,this.getCompressedContent()),b=d.uncompress(a);if(b.length!==f)throw new Error("Bug : uncompressed data size mismatch");return b}},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readString(this.fileNameLength),a.skip(c),-1==this.compressedSize||-1==this.uncompressedSize)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize == -1 || uncompressedSize == -1)");if(b=e.findCompression(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+e.pretty(this.compressionMethod)+" unknown (inner file : "+this.fileName+")");if(this.decompressed=new f,this.decompressed.compressedSize=this.compressedSize,this.decompressed.uncompressedSize=this.uncompressedSize,this.decompressed.crc32=this.crc32,this.decompressed.compressionMethod=this.compressionMethod,this.decompressed.getCompressedContent=this.prepareCompressedContent(a,a.index,this.compressedSize,b),this.decompressed.getContent=this.prepareContent(a,a.index,this.compressedSize,b,this.uncompressedSize),this.loadOptions.checkCRC32&&(this.decompressed=e.transformTo("string",this.decompressed.getContent()),g.crc32(this.decompressed)!==this.crc32))throw new Error("Corrupted zip : CRC32 mismatch")},readCentralPart:function(a){if(this.versionMadeBy=a.readString(2),this.versionNeeded=a.readInt(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4),this.fileNameLength=a.readInt(2),this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");this.fileName=a.readString(this.fileNameLength),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readString(this.fileCommentLength),this.dir=16&this.externalFileAttributes?!0:!1},parseZIP64ExtraField:function(){if(this.extraFields[1]){var a=new d(this.extraFields[1].value);this.uncompressedSize===e.MAX_VALUE_32BITS&&(this.uncompressedSize=a.readInt(8)),this.compressedSize===e.MAX_VALUE_32BITS&&(this.compressedSize=a.readInt(8)),this.localHeaderOffset===e.MAX_VALUE_32BITS&&(this.localHeaderOffset=a.readInt(8)),this.diskNumberStart===e.MAX_VALUE_32BITS&&(this.diskNumberStart=a.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index;for(this.extraFields=this.extraFields||{};a.index<e+this.extraFieldsLength;)b=a.readInt(2),c=a.readInt(2),d=a.readString(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){if(this.useUTF8())this.fileName=g.utf8decode(this.fileName),this.fileComment=g.utf8decode(this.fileComment);else{var a=this.findExtraFieldUnicodePath();null!==a&&(this.fileName=a);var b=this.findExtraFieldUnicodeComment();null!==b&&(this.fileComment=b)}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileName)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileComment)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null}},b.exports=c},{"./compressedObject":2,"./object":13,"./stringReader":15,"./utils":21}],24:[function(a,b){"use strict";var c=a("./lib/utils/common").assign,d=a("./lib/deflate"),e=a("./lib/inflate"),f=a("./lib/zlib/constants"),g={};c(g,d,e,f),b.exports=g},{"./lib/deflate":25,"./lib/inflate":26,"./lib/utils/common":27,"./lib/zlib/constants":30}],25:[function(a,b,c){"use strict";function d(a,b){var c=new s(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}function f(a,b){return b=b||{},b.gzip=!0,d(a,b)}var g=a("./zlib/deflate.js"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=0,m=4,n=0,o=1,p=-1,q=0,r=8,s=function(a){this.options=h.assign({level:p,method:r,chunkSize:16384,windowBits:15,memLevel:8,strategy:q,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=g.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==n)throw new Error(j[c]);b.header&&g.deflateSetHeader(this.strm,b.header)};s.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?m:l,e.input="string"==typeof a?i.string2buf(a):a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new h.Buf8(f),e.next_out=0,e.avail_out=f),c=g.deflate(e,d),c!==o&&c!==n)return this.onEnd(c),this.ended=!0,!1;(0===e.avail_out||0===e.avail_in&&d===m)&&this.onData("string"===this.options.to?i.buf2binstring(h.shrinkBuf(e.output,e.next_out)):h.shrinkBuf(e.output,e.next_out))}while((e.avail_in>0||0===e.avail_out)&&c!==o);return d===m?(c=g.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===n):!0},s.prototype.onData=function(a){this.chunks.push(a)},s.prototype.onEnd=function(a){a===n&&(this.result="string"===this.options.to?this.chunks.join(""):h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=s,c.deflate=d,c.deflateRaw=e,c.gzip=f},{"./utils/common":27,"./utils/strings":28,"./zlib/deflate.js":32,"./zlib/messages":37,"./zlib/zstream":39}],26:[function(a,b,c){"use strict";function d(a,b){var c=new m(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}var f=a("./zlib/inflate.js"),g=a("./utils/common"),h=a("./utils/strings"),i=a("./zlib/constants"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=a("./zlib/gzheader"),m=function(a){this.options=g.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=f.inflateInit2(this.strm,b.windowBits);if(c!==i.Z_OK)throw new Error(j[c]);this.header=new l,f.inflateGetHeader(this.strm,this.header)};m.prototype.push=function(a,b){var c,d,e,j,k,l=this.strm,m=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?i.Z_FINISH:i.Z_NO_FLUSH,l.input="string"==typeof a?h.binstring2buf(a):a,l.next_in=0,l.avail_in=l.input.length;do{if(0===l.avail_out&&(l.output=new g.Buf8(m),l.next_out=0,l.avail_out=m),c=f.inflate(l,i.Z_NO_FLUSH),c!==i.Z_STREAM_END&&c!==i.Z_OK)return this.onEnd(c),this.ended=!0,!1;l.next_out&&(0===l.avail_out||c===i.Z_STREAM_END||0===l.avail_in&&d===i.Z_FINISH)&&("string"===this.options.to?(e=h.utf8border(l.output,l.next_out),j=l.next_out-e,k=h.buf2string(l.output,e),l.next_out=j,l.avail_out=m-j,j&&g.arraySet(l.output,l.output,e,j,0),this.onData(k)):this.onData(g.shrinkBuf(l.output,l.next_out)))}while(l.avail_in>0&&c!==i.Z_STREAM_END);return c===i.Z_STREAM_END&&(d=i.Z_FINISH),d===i.Z_FINISH?(c=f.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===i.Z_OK):!0},m.prototype.onData=function(a){this.chunks.push(a)},m.prototype.onEnd=function(a){a===i.Z_OK&&(this.result="string"===this.options.to?this.chunks.join(""):g.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=m,c.inflate=d,c.inflateRaw=e,c.ungzip=d},{"./utils/common":27,"./utils/strings":28,"./zlib/constants":30,"./zlib/gzheader":33,"./zlib/inflate.js":35,"./zlib/messages":37,"./zlib/zstream":39}],27:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;c>b;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;c>b;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],28:[function(a,b,c){"use strict";function d(a,b){if(65537>b&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;b>d;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;256>j;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=new e.Buf8(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;d>c;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;h>c;)if(f=a[c++],128>f)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&h>c;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:65536>f?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":27}],29:[function(a,b){"use strict";function c(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0}b.exports=c},{}],30:[function(a,b){b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],31:[function(a,b){"use strict";function c(){for(var a,b=[],c=0;256>c;c++){a=c;for(var d=0;8>d;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function d(a,b,c,d){var f=e,g=d+c;a=-1^a;for(var h=d;g>h;h++)a=a>>>8^f[255&(a^b[h])];return-1^a}var e=c();b.exports=d},{}],32:[function(a,b,c){"use strict";function d(a,b){return a.msg=G[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(C.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){D._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,C.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=E(a.adler,b,e,c):2===a.state.wrap&&(a.adler=F(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-jb?a.strstart-(a.w_size-jb):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ib,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&m>f);if(d=ib-(m-f),f=m-ib,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-jb)){C.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=hb)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+hb-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<hb)););}while(a.lookahead<jb&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===H)return sb;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return sb;if(a.strstart-a.block_start>=a.w_size-jb&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?sb:sb}function o(a,b){for(var c,d;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c)),a.match_length>=hb)if(d=D._tr_tally(a,a.strstart-a.match_start,a.match_length-hb),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=hb){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function p(a,b){for(var c,d,e;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=hb-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===S||a.match_length===hb&&a.strstart-a.match_start>4096)&&(a.match_length=hb-1)),a.prev_length>=hb&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-hb,d=D._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-hb),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=hb-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return sb}else if(a.match_available){if(d=D._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return sb}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=D._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ib){if(m(a),a.lookahead<=ib&&b===H)return sb;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=hb&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ib;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&f>e);a.match_length=ib-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=hb?(c=D._tr_tally(a,1,a.match_length-hb),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===H)return sb;break}if(a.match_length=0,c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function s(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=B[a.level].max_lazy,a.good_match=B[a.level].good_length,a.nice_match=B[a.level].nice_length,a.max_chain_length=B[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=hb-1,a.match_available=0,a.ins_h=0}function t(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=Y,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new C.Buf16(2*fb),this.dyn_dtree=new C.Buf16(2*(2*db+1)),this.bl_tree=new C.Buf16(2*(2*eb+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new C.Buf16(gb+1),this.heap=new C.Buf16(2*cb+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new C.Buf16(2*cb+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function u(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=X,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?lb:qb,a.adler=2===b.wrap?0:1,b.last_flush=H,D._tr_init(b),M):d(a,O)}function v(a){var b=u(a);return b===M&&s(a.state),b}function w(a,b){return a&&a.state?2!==a.state.wrap?O:(a.state.gzhead=b,M):O}function x(a,b,c,e,f,g){if(!a)return O;var h=1;if(b===R&&(b=6),0>e?(h=0,e=-e):e>15&&(h=2,e-=16),1>f||f>Z||c!==Y||8>e||e>15||0>b||b>9||0>g||g>V)return d(a,O);8===e&&(e=9);var i=new t;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+hb-1)/hb),i.window=new C.Buf8(2*i.w_size),i.head=new C.Buf16(i.hash_size),i.prev=new C.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new C.Buf8(i.pending_buf_size),i.d_buf=i.lit_bufsize>>1,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,v(a)}function y(a,b){return x(a,b,Y,$,_,W)}function z(a,b){var c,h,k,l;if(!a||!a.state||b>L||0>b)return a?d(a,O):O;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===rb&&b!==K)return d(a,0===a.avail_out?Q:O);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===lb)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=F(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=mb):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,wb),h.status=qb);else{var m=Y+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=T||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=kb),m+=31-m%31,h.status=qb,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===mb)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=nb)}else h.status=nb;if(h.status===nb)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=ob)}else h.status=ob;if(h.status===ob)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=pb)}else h.status=pb;if(h.status===pb&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=qb)):h.status=qb),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,M}else if(0===a.avail_in&&e(b)<=e(c)&&b!==K)return d(a,Q);if(h.status===rb&&0!==a.avail_in)return d(a,Q);if(0!==a.avail_in||0!==h.lookahead||b!==H&&h.status!==rb){var o=h.strategy===T?r(h,b):h.strategy===U?q(h,b):B[h.level].func(h,b);if((o===ub||o===vb)&&(h.status=rb),o===sb||o===ub)return 0===a.avail_out&&(h.last_flush=-1),M;if(o===tb&&(b===I?D._tr_align(h):b!==L&&(D._tr_stored_block(h,0,0,!1),b===J&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,M}return b!==K?M:h.wrap<=0?N:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?M:N)}function A(a){var b;return a&&a.state?(b=a.state.status,b!==lb&&b!==mb&&b!==nb&&b!==ob&&b!==pb&&b!==qb&&b!==rb?d(a,O):(a.state=null,b===qb?d(a,P):M)):O}var B,C=a("../utils/common"),D=a("./trees"),E=a("./adler32"),F=a("./crc32"),G=a("./messages"),H=0,I=1,J=3,K=4,L=5,M=0,N=1,O=-2,P=-3,Q=-5,R=-1,S=1,T=2,U=3,V=4,W=0,X=2,Y=8,Z=9,$=15,_=8,ab=29,bb=256,cb=bb+1+ab,db=30,eb=19,fb=2*cb+1,gb=15,hb=3,ib=258,jb=ib+hb+1,kb=32,lb=42,mb=69,nb=73,ob=91,pb=103,qb=113,rb=666,sb=1,tb=2,ub=3,vb=4,wb=3,xb=function(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e};B=[new xb(0,0,0,0,n),new xb(4,4,8,4,o),new xb(4,5,16,8,o),new xb(4,6,32,32,o),new xb(4,4,16,16,p),new xb(8,16,32,32,p),new xb(8,16,128,128,p),new xb(8,32,128,256,p),new xb(32,128,258,1024,p),new xb(32,258,258,4096,p)],c.deflateInit=y,c.deflateInit2=x,c.deflateReset=v,c.deflateResetKeep=u,c.deflateSetHeader=w,c.deflate=z,c.deflateEnd=A,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./messages":37,"./trees":38}],33:[function(a,b){"use strict";function c(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=c},{}],34:[function(a,b){"use strict";var c=30,d=12;b.exports=function(a,b){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;e=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=e.dmax,l=e.wsize,m=e.whave,n=e.wnext,o=e.window,p=e.hold,q=e.bits,r=e.lencode,s=e.distcode,t=(1<<e.lenbits)-1,u=(1<<e.distbits)-1;a:do{15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){e.mode=d;break a}a.msg="invalid literal/length code",e.mode=c;break a}x=65535&v,w&=15,w&&(w>q&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",e.mode=c;break a}if(y=65535&v,w&=15,w>q&&(p+=B[f++]<<q,q+=8,w>q&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",e.mode=c;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&e.sane){a.msg="invalid distance too far back",e.mode=c;break a}if(z=0,A=o,0===n){if(z+=l-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(w>n){if(z+=l+n-w,w-=n,x>w){x-=w;do C[h++]=o[z++];while(--w);if(z=0,x>n){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(g>f&&j>h);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=g>f?5+(g-f):5-(f-g),a.avail_out=j>h?257+(j-h):257-(h-j),e.hold=p,e.bits=q}},{}],35:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new r.Buf16(320),this.work=new r.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=K,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new r.Buf32(ob),b.distcode=b.distdyn=new r.Buf32(pb),b.sane=1,b.back=-1,C):F}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):F}function h(a,b){var c,d;return a&&a.state?(d=a.state,0>b?(c=0,b=-b):(c=(b>>4)+1,48>b&&(b&=15)),b&&(8>b||b>15)?F:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):F}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==C&&(a.state=null),c):F}function j(a){return i(a,rb)}function k(a){if(sb){var b;for(p=new r.Buf32(512),q=new r.Buf32(32),b=0;144>b;)a.lens[b++]=8;for(;256>b;)a.lens[b++]=9;for(;280>b;)a.lens[b++]=7;for(;288>b;)a.lens[b++]=8;for(v(x,a.lens,0,288,p,0,a.work,{bits:9}),b=0;32>b;)a.lens[b++]=5;v(y,a.lens,0,32,q,0,a.work,{bits:5}),sb=!1}a.lencode=p,a.lenbits=9,a.distcode=q,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new r.Buf8(f.wsize)),d>=f.wsize?(r.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),r.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(r.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,ob,pb,qb,rb,sb,tb,ub,vb,wb,xb,yb,zb,Ab=0,Bb=new r.Buf8(4),Cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return F;c=a.state,c.mode===V&&(c.mode=W),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xb=C;a:for(;;)switch(c.mode){case K:if(0===c.wrap){c.mode=W;break}for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0),m=0,n=0,c.mode=L;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=lb;break}if((15&m)!==J){a.msg="unknown compression method",c.mode=lb;break}if(m>>>=4,n-=4,wb=(15&m)+8,0===c.wbits)c.wbits=wb;else if(wb>c.wbits){a.msg="invalid window size",c.mode=lb;break}c.dmax=1<<wb,a.adler=c.check=1,c.mode=512&m?T:V,m=0,n=0;break;case L:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==J){a.msg="unknown compression method",c.mode=lb;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=lb;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=M;case M:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,Bb[2]=m>>>16&255,Bb[3]=m>>>24&255,c.check=t(c.check,Bb,4,0)),m=0,n=0,c.mode=N;case N:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=O;case O:if(1024&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=P;case P:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wb=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),r.arraySet(c.head.extra,e,g,q,wb)),512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=Q;case Q:if(2048&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.name+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=R;case R:if(4096&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.comment+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.comment=null);c.mode=S;case S:if(512&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=lb;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=V;break;case T:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=U;case U:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,E;a.adler=c.check=1,c.mode=V;case V:if(b===A||b===B)break a;case W:if(c.last){m>>>=7&n,n-=7&n,c.mode=ib;break}for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=X;break;case 1:if(k(c),c.mode=bb,b===B){m>>>=2,n-=2;break a}break;case 2:c.mode=$;break;case 3:a.msg="invalid block type",c.mode=lb}m>>>=2,n-=2;break;case X:for(m>>>=7&n,n-=7&n;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=lb;break}if(c.length=65535&m,m=0,n=0,c.mode=Y,b===B)break a;case Y:c.mode=Z;case Z:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;r.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=V;break;case $:for(;14>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=lb;break}c.have=0,c.mode=_;case _:for(;c.have<c.ncode;){for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Cb[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Cb[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,yb={bits:c.lenbits},xb=v(w,c.lens,0,19,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid code lengths set",c.mode=lb;break}c.have=0,c.mode=ab;case ab:for(;c.have<c.nlen+c.ndist;){for(;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(16>sb)m>>>=qb,n-=qb,c.lens[c.have++]=sb;else{if(16===sb){for(zb=qb+2;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qb,n-=qb,0===c.have){a.msg="invalid bit length repeat",c.mode=lb;break}wb=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sb){for(zb=qb+3;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=3+(7&m),m>>>=3,n-=3}else{for(zb=qb+7;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=lb;break}for(;q--;)c.lens[c.have++]=wb}}if(c.mode===lb)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=lb;break}if(c.lenbits=9,yb={bits:c.lenbits},xb=v(x,c.lens,0,c.nlen,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid literal/lengths set",c.mode=lb;break}if(c.distbits=6,c.distcode=c.distdyn,yb={bits:c.distbits},xb=v(y,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,yb),c.distbits=yb.bits,xb){a.msg="invalid distances set",c.mode=lb;break}if(c.mode=bb,b===B)break a;case bb:c.mode=cb;case cb:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,u(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===V&&(c.back=-1);break}for(c.back=0;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(rb&&0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.lencode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,c.length=sb,0===rb){c.mode=hb;break}if(32&rb){c.back=-1,c.mode=V;break}if(64&rb){a.msg="invalid literal/length code",c.mode=lb;break}c.extra=15&rb,c.mode=db;case db:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=eb;case eb:for(;Ab=c.distcode[m&(1<<c.distbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.distcode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,64&rb){a.msg="invalid distance code",c.mode=lb;break}c.offset=sb,c.extra=15&rb,c.mode=fb;case fb:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=lb;break}c.mode=gb;case gb:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.sane){a.msg="invalid distance too far back",c.mode=lb;break}q>c.wnext?(q-=c.wnext,ob=c.wsize-q):ob=c.wnext-q,q>c.length&&(q=c.length),pb=c.window}else pb=f,ob=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pb[ob++];while(--q);0===c.length&&(c.mode=cb);break;case hb:if(0===j)break a;f[h++]=c.length,j--,c.mode=cb;break;case ib:if(c.wrap){for(;32>n;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?t(c.check,f,p,h-p):s(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=lb;break}m=0,n=0}c.mode=jb;case jb:if(c.wrap&&c.flags){for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=lb;break}m=0,n=0}c.mode=kb;case kb:xb=D;break a;case lb:xb=G;break a;case mb:return H;case nb:default:return F}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<lb&&(c.mode<ib||b!==z))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=mb,H):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?t(c.check,f,p,a.next_out-p):s(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===V?128:0)+(c.mode===bb||c.mode===Y?256:0),(0===o&&0===p||b===z)&&xb===C&&(xb=I),xb)}function n(a){if(!a||!a.state)return F;var b=a.state;return b.window&&(b.window=null),a.state=null,C}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?F:(c.head=b,b.done=!1,C)):F}var p,q,r=a("../utils/common"),s=a("./adler32"),t=a("./crc32"),u=a("./inffast"),v=a("./inftrees"),w=0,x=1,y=2,z=4,A=5,B=6,C=0,D=1,E=2,F=-2,G=-3,H=-4,I=-5,J=8,K=1,L=2,M=3,N=4,O=5,P=6,Q=7,R=8,S=9,T=10,U=11,V=12,W=13,X=14,Y=15,Z=16,$=17,_=18,ab=19,bb=20,cb=21,db=22,eb=23,fb=24,gb=25,hb=26,ib=27,jb=28,kb=29,lb=30,mb=31,nb=32,ob=852,pb=592,qb=15,rb=qb,sb=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./inffast":34,"./inftrees":36}],36:[function(a,b){"use strict";var c=a("../utils/common"),d=15,e=852,f=592,g=0,h=1,i=2,j=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],k=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],l=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],m=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,n,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new c.Buf16(d+1),Q=new c.Buf16(d+1),R=null,S=0;for(D=0;d>=D;D++)P[D]=0;for(E=0;o>E;E++)P[b[n+E]]++;for(H=C,G=d;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;G>F&&0===P[F];F++);for(F>H&&(H=F),K=1,D=1;d>=D;D++)if(K<<=1,K-=P[D],0>K)return-1;if(K>0&&(a===g||1!==G))return-1;for(Q[1]=0,D=1;d>D;D++)Q[D+1]=Q[D]+P[D];for(E=0;o>E;E++)0!==b[n+E]&&(r[Q[b[n+E]]++]=E);if(a===g?(N=R=r,y=19):a===h?(N=j,O-=257,R=k,S-=257,y=256):(N=l,R=m,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===h&&L>e||a===i&&L>f)return 1;for(var T=0;;){T++,z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[n+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;G>I+J&&(K-=P[I+J],!(0>=K));)I++,K<<=1;if(L+=1<<I,a===h&&L>e||a===i&&L>f)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":27}],37:[function(a,b){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],38:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a){return 256>a?gb[a]:gb[256+(a>>>7)]}function f(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function g(a,b,c){a.bi_valid>V-c?(a.bi_buf|=b<<a.bi_valid&65535,f(a,a.bi_buf),a.bi_buf=b>>V-a.bi_valid,a.bi_valid+=c-V):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function h(a,b,c){g(a,c[2*b],c[2*b+1])}function i(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function j(a){16===a.bi_valid?(f(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function k(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;U>=f;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,c=a.heap_max+1;T>c;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function l(a,b,c){var d,e,f=new Array(U+1),g=0;for(d=1;U>=d;d++)f[d]=g=g+c[d-1]<<1;for(e=0;b>=e;e++){var h=a[2*e+1];0!==h&&(a[2*e]=i(f[h]++,h))}}function m(){var a,b,c,d,e,f=new Array(U+1);for(c=0,d=0;O-1>d;d++)for(ib[d]=c,a=0;a<1<<_[d];a++)hb[c++]=d;for(hb[c-1]=d,e=0,d=0;16>d;d++)for(jb[d]=e,a=0;a<1<<ab[d];a++)gb[e++]=d;for(e>>=7;R>d;d++)for(jb[d]=e<<7,a=0;a<1<<ab[d]-7;a++)gb[256+e++]=d;for(b=0;U>=b;b++)f[b]=0;for(a=0;143>=a;)eb[2*a+1]=8,a++,f[8]++;for(;255>=a;)eb[2*a+1]=9,a++,f[9]++;for(;279>=a;)eb[2*a+1]=7,a++,f[7]++;for(;287>=a;)eb[2*a+1]=8,a++,f[8]++;for(l(eb,Q+1,f),a=0;R>a;a++)fb[2*a+1]=5,fb[2*a]=i(a,5);kb=new nb(eb,_,P+1,Q,U),lb=new nb(fb,ab,0,R,U),mb=new nb(new Array(0),bb,0,S,W)}function n(a){var b;for(b=0;Q>b;b++)a.dyn_ltree[2*b]=0;for(b=0;R>b;b++)a.dyn_dtree[2*b]=0;for(b=0;S>b;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*X]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function o(a){a.bi_valid>8?f(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function p(a,b,c,d){o(a),d&&(f(a,c),f(a,~c)),E.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function q(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function r(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&q(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!q(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function s(a,b,c){var d,f,i,j,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],f=a.pending_buf[a.l_buf+k],k++,0===d?h(a,f,b):(i=hb[f],h(a,i+P+1,b),j=_[i],0!==j&&(f-=ib[i],g(a,f,j)),d--,i=e(d),h(a,i,c),j=ab[i],0!==j&&(d-=jb[i],g(a,d,j)));while(k<a.last_lit);h(a,X,b)}function t(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=T,c=0;i>c;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=2>j?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)r(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],r(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,r(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],k(a,b),l(f,j,a.bl_count)}function u(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;c>=d;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(j>h?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*Y]++):10>=h?a.bl_tree[2*Z]++:a.bl_tree[2*$]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function v(a,b,c){var d,e,f=-1,i=b[1],j=0,k=7,l=4;for(0===i&&(k=138,l=3),d=0;c>=d;d++)if(e=i,i=b[2*(d+1)+1],!(++j<k&&e===i)){if(l>j){do h(a,e,a.bl_tree);while(0!==--j)}else 0!==e?(e!==f&&(h(a,e,a.bl_tree),j--),h(a,Y,a.bl_tree),g(a,j-3,2)):10>=j?(h(a,Z,a.bl_tree),g(a,j-3,3)):(h(a,$,a.bl_tree),g(a,j-11,7));j=0,f=e,0===i?(k=138,l=3):e===i?(k=6,l=3):(k=7,l=4)}}function w(a){var b;for(u(a,a.dyn_ltree,a.l_desc.max_code),u(a,a.dyn_dtree,a.d_desc.max_code),t(a,a.bl_desc),b=S-1;b>=3&&0===a.bl_tree[2*cb[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function x(a,b,c,d){var e;for(g(a,b-257,5),g(a,c-1,5),g(a,d-4,4),e=0;d>e;e++)g(a,a.bl_tree[2*cb[e]+1],3);v(a,a.dyn_ltree,b-1),v(a,a.dyn_dtree,c-1)}function y(a){var b,c=4093624447;for(b=0;31>=b;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return G;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return H;for(b=32;P>b;b++)if(0!==a.dyn_ltree[2*b])return H;return G}function z(a){pb||(m(),pb=!0),a.l_desc=new ob(a.dyn_ltree,kb),a.d_desc=new ob(a.dyn_dtree,lb),a.bl_desc=new ob(a.bl_tree,mb),a.bi_buf=0,a.bi_valid=0,n(a)}function A(a,b,c,d){g(a,(J<<1)+(d?1:0),3),p(a,b,c,!0)}function B(a){g(a,K<<1,3),h(a,X,eb),j(a)}function C(a,b,c,d){var e,f,h=0;a.level>0?(a.strm.data_type===I&&(a.strm.data_type=y(a)),t(a,a.l_desc),t(a,a.d_desc),h=w(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,e>=f&&(e=f)):e=f=c+5,e>=c+4&&-1!==b?A(a,b,c,d):a.strategy===F||f===e?(g(a,(K<<1)+(d?1:0),3),s(a,eb,fb)):(g(a,(L<<1)+(d?1:0),3),x(a,a.l_desc.max_code+1,a.d_desc.max_code+1,h+1),s(a,a.dyn_ltree,a.dyn_dtree)),n(a),d&&o(a)}function D(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(hb[c]+P+1)]++,a.dyn_dtree[2*e(b)]++),a.last_lit===a.lit_bufsize-1}var E=a("../utils/common"),F=4,G=0,H=1,I=2,J=0,K=1,L=2,M=3,N=258,O=29,P=256,Q=P+1+O,R=30,S=19,T=2*Q+1,U=15,V=16,W=7,X=256,Y=16,Z=17,$=18,_=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ab=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],bb=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],db=512,eb=new Array(2*(Q+2));d(eb);var fb=new Array(2*R);d(fb);var gb=new Array(db);d(gb);var hb=new Array(N-M+1);d(hb);var ib=new Array(O);d(ib);var jb=new Array(R);d(jb);var kb,lb,mb,nb=function(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length},ob=function(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b},pb=!1;c._tr_init=z,c._tr_stored_block=A,c._tr_flush_block=C,c._tr_tally=D,c._tr_align=B},{"../utils/common":27}],39:[function(a,b){"use strict";function c(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=c},{}]},{},[9])(9)});'use strict';if(tr.isVinn){global.window={};}'use strict';if(tr.isVinn){global.JSZip=global.window.JSZip;global.window=undefined;}else if(tr.isNode){const jsZipAbsPath=HTMLImportsLoader.hrefToAbsolutePath('/jszip.min.js');const jsZipModule=require(jsZipAbsPath);global.JSZip=jsZipModule;}'use strict';tr.exportTo('tr.e.importer',function(){const GZIP_MEMBER_HEADER_ID_SIZE=3;const GZIP_HEADER_ID1=0x1f;const GZIP_HEADER_ID2=0x8b;const GZIP_DEFLATE_COMPRESSION=8;function GzipImporter(model,eventData){this.inflateAsTraceStream=false;if(typeof(eventData)==='string'||eventData instanceof String){eventData=JSZip.utils.transformTo('uint8array',eventData);}else if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);}else if(eventData instanceof tr.b.InMemoryTraceStream){eventData=eventData.data;this.inflateAsTraceStream_=true;}else{throw new Error('Unknown gzip data format');} this.model_=model;this.gzipData_=eventData;} GzipImporter.canImport=function(eventData){if(eventData instanceof tr.b.InMemoryTraceStream){eventData=eventData.header;} let header;if(eventData instanceof ArrayBuffer){header=new Uint8Array(eventData.slice(0,GZIP_MEMBER_HEADER_ID_SIZE));}else if(typeof(eventData)==='string'||eventData instanceof String){header=eventData.substring(0,GZIP_MEMBER_HEADER_ID_SIZE);header=JSZip.utils.transformTo('uint8array',header);}else{return false;} @@ -5700,7 +5700,7 @@ XMarkerAnnotationView.prototype={__proto__:tr.ui.annotations.AnnotationView.prototype,draw(ctx){const dt=this.viewport_.currentDisplayTransform;const viewX=dt.xWorldToView(this.annotation_.timestamp);ctx.beginPath();tr.ui.b.drawLine(ctx,viewX,0,viewX,ctx.canvas.height);ctx.strokeStyle=this.annotation_.strokeStyle;ctx.stroke();}};return{XMarkerAnnotationView,};});'use strict';tr.exportTo('tr.model',function(){function XMarkerAnnotation(timestamp){tr.model.Annotation.apply(this,arguments);this.timestamp=timestamp;this.strokeStyle='rgba(0, 0, 255, 0.5)';} XMarkerAnnotation.fromDict=function(dict){return new XMarkerAnnotation(dict.args.timestamp);};XMarkerAnnotation.prototype={__proto__:tr.model.Annotation.prototype,toDict(){return{typeName:'xmarker',args:{timestamp:this.timestamp}};},createView_(viewport){return new tr.ui.annotations.XMarkerAnnotationView(viewport,this);}};tr.model.Annotation.register(XMarkerAnnotation,{typeName:'xmarker'});return{XMarkerAnnotation,};});'use strict';tr.exportTo('tr.e.importer',function(){const Base64=tr.b.Base64;const deepCopy=tr.b.deepCopy;const ColorScheme=tr.b.ColorScheme;const HeapDumpTraceEventImporter=tr.e.importer.HeapDumpTraceEventImporter;const LegacyHeapDumpTraceEventImporter=tr.e.importer.LegacyHeapDumpTraceEventImporter;const StreamingEventExpander=tr.e.importer.StreamingEventExpander;const ProfilingDictionaryReader=tr.e.importer.ProfilingDictionaryReader;function getEventColor(event,opt_customName){if(event.cname){return ColorScheme.getColorIdForReservedName(event.cname);}else if(opt_customName||event.name){return ColorScheme.getColorIdForGeneralPurposeString(opt_customName||event.name);}} function isLegacyChromeClockSyncEvent(event){return event.name!==undefined&&event.name.startsWith(LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX)&&((event.ph==='S')||(event.ph==='F'));} -const PRODUCER='producer';const CONSUMER='consumer';const STEP='step';const BACKGROUND=tr.model.ContainerMemoryDump.LevelOfDetail.BACKGROUND;const LIGHT=tr.model.ContainerMemoryDump.LevelOfDetail.LIGHT;const DETAILED=tr.model.ContainerMemoryDump.LevelOfDetail.DETAILED;const MEMORY_DUMP_LEVEL_OF_DETAIL_ORDER=[undefined,BACKGROUND,LIGHT,DETAILED];const GLOBAL_MEMORY_ALLOCATOR_DUMP_PREFIX='global/';const LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX='ClockSyncEvent.';const BYTE_STAT_NAME_MAP={'pc':'privateCleanResident','pd':'privateDirtyResident','sc':'sharedCleanResident','sd':'sharedDirtyResident','pss':'proportionalResident','sw':'swapped'};const WEAK_MEMORY_ALLOCATOR_DUMP_FLAG=1<<0;const OBJECT_TYPE_NAME_PATTERNS=[{prefix:'const char *WOW::getStringWithTypeName() [T = ',suffix:']'},{prefix:'const char* WOW::getStringWithTypeName() [with T = ',suffix:']'},{prefix:'const char *__cdecl WOW::getStringWithTypeName<',suffix:'>(void)'}];const SUBTRACE_FIELDS=new Set(['powerTraceAsString','systemTraceEvents',]);const NON_METADATA_FIELDS=new Set(['displayTimeUnit','samples','stackFrames','traceAnnotations','traceEvents',...SUBTRACE_FIELDS]);function TraceEventImporter(model,eventData){this.hasEvents_=undefined;this.importPriority=1;this.model_=model;this.events_=undefined;this.sampleEvents_=undefined;this.stackFrameEvents_=undefined;this.stackFrameTree_=new tr.model.ProfileTree();this.subtraces_=[];this.eventsWereFromString_=false;this.softwareMeasuredCpuCount_=undefined;this.allAsyncEvents_=[];this.allFlowEvents_=[];this.allObjectEvents_=[];this.contextProcessorPerThread={};this.traceEventSampleStackFramesByName_={};this.v8ProcessCodeMaps_={};this.v8ProcessRootStackFrame_={};this.v8SamplingData_=[];this.profileTrees_=new Map();this.profileInfo_=new Map();this.legacyChromeClockSyncStartEvent_=undefined;this.legacyChromeClockSyncFinishEvent_=undefined;this.allMemoryDumpEvents_={};this.heapProfileExpander=new ProfilingDictionaryReader();this.objectTypeNameMap_={};this.clockDomainId_=tr.model.ClockDomainId.UNKNOWN_CHROME_LEGACY;this.toModelTime_=undefined;if(typeof(eventData)==='string'||eventData instanceof String){eventData=eventData.trim();if(eventData[0]==='['){eventData=eventData.replace(/\s*,\s*$/,'');if(eventData[eventData.length-1]!==']'){eventData=eventData+']';}} +const PRODUCER='producer';const CONSUMER='consumer';const STEP='step';const BACKGROUND=tr.model.ContainerMemoryDump.LevelOfDetail.BACKGROUND;const LIGHT=tr.model.ContainerMemoryDump.LevelOfDetail.LIGHT;const DETAILED=tr.model.ContainerMemoryDump.LevelOfDetail.DETAILED;const MEMORY_DUMP_LEVEL_OF_DETAIL_ORDER=[undefined,BACKGROUND,LIGHT,DETAILED];const GLOBAL_MEMORY_ALLOCATOR_DUMP_PREFIX='global/';const LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX='ClockSyncEvent.';const BYTE_STAT_NAME_MAP={'pc':'privateCleanResident','pd':'privateDirtyResident','sc':'sharedCleanResident','sd':'sharedDirtyResident','pss':'proportionalResident','sw':'swapped'};const WEAK_MEMORY_ALLOCATOR_DUMP_FLAG=1<<0;const OBJECT_TYPE_NAME_PATTERNS=[{prefix:'const char *WTF::getStringWithTypeName() [T = ',suffix:']'},{prefix:'const char* WTF::getStringWithTypeName() [with T = ',suffix:']'},{prefix:'const char *__cdecl WTF::getStringWithTypeName<',suffix:'>(void)'}];const SUBTRACE_FIELDS=new Set(['powerTraceAsString','systemTraceEvents',]);const NON_METADATA_FIELDS=new Set(['displayTimeUnit','samples','stackFrames','traceAnnotations','traceEvents',...SUBTRACE_FIELDS]);function TraceEventImporter(model,eventData){this.hasEvents_=undefined;this.importPriority=1;this.model_=model;this.events_=undefined;this.sampleEvents_=undefined;this.stackFrameEvents_=undefined;this.stackFrameTree_=new tr.model.ProfileTree();this.subtraces_=[];this.eventsWereFromString_=false;this.softwareMeasuredCpuCount_=undefined;this.allAsyncEvents_=[];this.allFlowEvents_=[];this.allObjectEvents_=[];this.contextProcessorPerThread={};this.traceEventSampleStackFramesByName_={};this.v8ProcessCodeMaps_={};this.v8ProcessRootStackFrame_={};this.v8SamplingData_=[];this.profileTrees_=new Map();this.profileInfo_=new Map();this.legacyChromeClockSyncStartEvent_=undefined;this.legacyChromeClockSyncFinishEvent_=undefined;this.allMemoryDumpEvents_={};this.heapProfileExpander=new ProfilingDictionaryReader();this.objectTypeNameMap_={};this.clockDomainId_=tr.model.ClockDomainId.UNKNOWN_CHROME_LEGACY;this.toModelTime_=undefined;if(typeof(eventData)==='string'||eventData instanceof String){eventData=eventData.trim();if(eventData[0]==='['){eventData=eventData.replace(/\s*,\s*$/,'');if(eventData[eventData.length-1]!==']'){eventData=eventData+']';}} this.events_=JSON.parse(eventData);this.eventsWereFromString_=true;}else{this.events_=eventData;} if(this.events_.traceEvents){const container=this.events_;this.events_=this.events_.traceEvents;for(const subtraceField of SUBTRACE_FIELDS){if(container[subtraceField]){this.storeSubtrace_(container[subtraceField]);}} this.storeSamples_(container.samples);this.storeStackFrames_(container.stackFrames);this.storeDisplayTimeUnit_(container.displayTimeUnit);this.storeTraceAnnotations_(container.traceAnnotations);this.storeMetadata_(container);}else if(this.events_ instanceof tr.b.TraceStream){const parser=oboe().node('{cat ph}',function(e){return oboe.drop;}).node('!.powerTraceAsString',this.storeSubtrace_.bind(this)).node('!.systemTraceEvents',this.storeSubtrace_.bind(this)).node('!.samples',this.storeSamples_.bind(this)).node('!.stackFrames',this.storeStackFrames_.bind(this)).node('!.displayTimeUnit',this.storeDisplayTimeUnit_.bind(this)).node('!.traceAnnotations',this.storeTraceAnnotations_.bind(this)).done(this.storeMetadata_.bind(this));this.events_.rewind();while(this.events_.hasData){parser.write(this.events_.readNumBytes());} @@ -7461,10 +7461,10 @@ if(dict.callback===undefined){throw new Error('callback must be given');} this.eventType_=dict.eventType;this.keyCodes_=[];if(dict.keyCode){this.pushKeyCode_(dict.keyCode);}else if(dict.keyCodes){dict.keyCodes.forEach(this.pushKeyCode_,this);} this.useCapture_=!!dict.useCapture;this.callback_=dict.callback;this.thisArg_=dict.thisArg!==undefined?dict.thisArg:undefined;this.helpText_=dict.helpText!==undefined?dict.helpText:undefined;} -HotKey.prototype={get eventType(){return this.eventType_;},get keyCodes(){return this.keyCodes_;},get helpText(){return this.helpText_;},call(e){this.callback_.call(this.thisArg_,e);},pushKeyCode_(keyCode){this.keyCodes_.push(keyCode);}};return{HotKey,};});'use strict';Polymer({is:'tv-ui-b-hotkey-controller',created(){this.isAttached_=false;this.globalMode_=false;this.coupledToParentController_=undefined;this.curHost_=undefined;this.childControllers_=[];this.bubblingKeyDownHotKeys_={};this.capturingKeyDownHotKeys_={};this.bubblingKeyPressHotKeys_={};this.capturingKeyPressHotKeys_={};this.onBubblingKeyDown_=this.onKey_.bind(this,false);this.onCapturingKeyDown_=this.onKey_.bind(this,true);this.onBubblingKeyPress_=this.onKey_.bind(this,false);this.onCapturingKeyPress_=this.onKey_.bind(this,true);},attached(){this.isAttached_=true;const host=this.findHost_();if(host.__hotkeyController){throw new Error('Multiple hotkey controllers attached to this host');} +HotKey.prototype={get eventType(){return this.eventType_;},get keyCodes(){return this.keyCodes_;},get helpText(){return this.helpText_;},call(e){this.callback_.call(this.thisArg_,e);},pushKeyCode_(keyCode){this.keyCodes_.push(keyCode);}};return{HotKey,};});'use strict';Polymer({is:'tv-ui-b-hotkey-controller',created(){this.isAttached_=false;this.globalMode_=false;this.slavedToParentController_=undefined;this.curHost_=undefined;this.childControllers_=[];this.bubblingKeyDownHotKeys_={};this.capturingKeyDownHotKeys_={};this.bubblingKeyPressHotKeys_={};this.capturingKeyPressHotKeys_={};this.onBubblingKeyDown_=this.onKey_.bind(this,false);this.onCapturingKeyDown_=this.onKey_.bind(this,true);this.onBubblingKeyPress_=this.onKey_.bind(this,false);this.onCapturingKeyPress_=this.onKey_.bind(this,true);},attached(){this.isAttached_=true;const host=this.findHost_();if(host.__hotkeyController){throw new Error('Multiple hotkey controllers attached to this host');} host.__hotkeyController=this;this.curHost_=host;let parentElement;if(host.parentElement){parentElement=host.parentElement;}else{parentElement=Polymer.dom(host).parentNode.host;} -const parentController=tr.b.getHotkeyControllerForElement(parentElement);if(parentController){this.coupledToParentController_=parentController;parentController.addChildController_(this);return;} -host.addEventListener('keydown',this.onBubblingKeyDown_,false);host.addEventListener('keydown',this.onCapturingKeyDown_,true);host.addEventListener('keypress',this.onBubblingKeyPress_,false);host.addEventListener('keypress',this.onCapturingKeyPress_,true);},detached(){this.isAttached_=false;const host=this.curHost_;if(!host)return;delete host.__hotkeyController;this.curHost_=undefined;if(this.coupledToParentController_){this.coupledToParentController_.removeChildController_(this);this.coupledToParentController_=undefined;return;} +const parentController=tr.b.getHotkeyControllerForElement(parentElement);if(parentController){this.slavedToParentController_=parentController;parentController.addChildController_(this);return;} +host.addEventListener('keydown',this.onBubblingKeyDown_,false);host.addEventListener('keydown',this.onCapturingKeyDown_,true);host.addEventListener('keypress',this.onBubblingKeyPress_,false);host.addEventListener('keypress',this.onCapturingKeyPress_,true);},detached(){this.isAttached_=false;const host=this.curHost_;if(!host)return;delete host.__hotkeyController;this.curHost_=undefined;if(this.slavedToParentController_){this.slavedToParentController_.removeChildController_(this);this.slavedToParentController_=undefined;return;} host.removeEventListener('keydown',this.onBubblingKeyDown_,false);host.removeEventListener('keydown',this.onCapturingKeyDown_,true);host.removeEventListener('keypress',this.onBubblingKeyPress_,false);host.removeEventListener('keypress',this.onCapturingKeyPress_,true);},addChildController_(controller){const i=this.childControllers_.indexOf(controller);if(i!==-1){throw new Error('Controller already registered');} this.childControllers_.push(controller);},removeChildController_(controller){const i=this.childControllers_.indexOf(controller);if(i===-1){throw new Error('Controller not registered');} this.childControllers_.splice(i,1);return controller;},getKeyMapForEventType_(eventType,useCapture){if(eventType==='keydown'){if(!useCapture){return this.bubblingKeyDownHotKeys_;} @@ -7479,7 +7479,7 @@ keyMap[keyCode]=hotKey;} for(let i=0;i<hotKey.keyCodes.length;i++){const keyCode=hotKey.keyCodes[i];delete keyMap[keyCode];} return hotKey;},get globalMode(){return this.globalMode_;},set globalMode(globalMode){const wasAttached=this.isAttached_;if(wasAttached){this.detached();} -this.globalMode_=!!globalMode;if(wasAttached){this.attached();}},get topmostConroller_(){if(this.coupledToParentController_){return this.coupledToParentController_.topmostConroller_;} +this.globalMode_=!!globalMode;if(wasAttached){this.attached();}},get topmostConroller_(){if(this.slavedToParentController_){return this.slavedToParentController_.topmostConroller_;} return this;},childRequestsGeneralFocus(child){const topmost=this.topmostConroller_;if(topmost.curHost_){if(topmost.curHost_.hasAttribute('tabIndex')){topmost.curHost_.focus();}else{if(document.activeElement){document.activeElement.blur();}}}else{if(document.activeElement){document.activeElement.blur();}}},childRequestsBlur(child){child.blur();const topmost=this.topmostConroller_;if(topmost.curHost_){topmost.curHost_.focus();}},findHost_(){if(this.globalMode_)return document.body;if(this.parentElement)return this.parentElement;if(!Polymer.dom(this).parentNode)return this.host;let node=this.parentNode;while(Polymer.dom(node).parentNode)node=Polymer.dom(node).parentNode;return node.host;},appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e){const localKeyMap=this.getKeyMapForEventType_(e.type,useCapture);const localHotKey=localKeyMap[e.keyCode];if(localHotKey){matchedHotKeys.push(localHotKey);} for(let i=0;i<this.childControllers_.length;i++){const controller=this.childControllers_[i];controller.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);}},onKey_(useCapture,e){if(!useCapture&&e.path[0].tagName==='INPUT')return;let sortedControllers;const matchedHotKeys=[];this.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);if(matchedHotKeys.length===0)return false;if(matchedHotKeys.length>1){throw new Error('More than one hotKey is currently unsupported');} const hotKey=matchedHotKeys[0];let prevented=0;prevented|=hotKey.call(e);return!prevented&&e.defaultPrevented;}});'use strict';tr.exportTo('tr.b',function(){function getHotkeyControllerForElement(refElement){let curElement=refElement;while(curElement){if(curElement.tagName==='tv-ui-b-hotkey-controller'){return curElement;} @@ -7829,7 +7829,7 @@ const ans={supported:false};for(const proc of Object.values(m.processes)){proc.objects.iterObjectInstances(function(instance){if(instance instanceof BlameContextInstance){ans.supported=true;}});} if(!ans.supported){ans.reason='No frame data available';} return ans;},get currentRangeOfInterest(){if(this.rangeOfInterest_.isEmpty){return this.model_.bounds;} -return this.rangeOfInterest_;},get rangeOfInterest(){return this.rangeOfInterest_;},set rangeOfInterest(rangeOfInterest){this.rangeOfInterest_=rangeOfInterest;this.updateContents_();},get selection(){},set selection(_){},get textLabel(){return'Frame Data';},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();}});tr.ui.side_panel.SidePanelRegistry.register(function(){return document.createElement('tr-ui-e-s-frame-data-side-panel');});});'use strict';Polymer({is:'tr-ui-b-chart-legend-key',ready(){this.$.checkbox.addEventListener('change',this.onCheckboxChange_.bind(this));},onCheckboxChange_(){tr.b.dispatchSimpleEvent(this,tr.ui.b.DataSeriesEnableChangeEventType,true,false,{key:Polymer.dom(this).textContent,enabled:this.enabled});},set textContent(t){Polymer.dom(this.$.label).textContent=t;Polymer.dom(this.$.link).textContent=t;this.updateContents_();},set width(w){w-=20;this.$.link.style.width=w+'px';this.$.label.style.width=w+'px';},get textContent(){return Polymer.dom(this.$.label).textContent;},set optional(optional){this.$.checkbox.style.visibility=optional?'visible':'hidden';},get optional(){return this.$.checkbox.style.visibility==='visible';},set enabled(enabled){this.$.checkbox.checked=enabled?'checked':'';},get enabled(){return this.$.checkbox.checked;},set color(c){this.$.label.style.color=c;this.$.link.color=c;},set target(target){this.$.link.setSelectionAndContent(target,Polymer.dom(this.$.label).textContent);this.updateContents_();},get target(){return this.$.link.selection;},set title(title){this.$.link.title=title;},updateContents_(){this.$.link.style.display=this.target?'':'none';this.$.label.style.display=this.target?'none':'';this.$.label.htmlFor=this.optional?'checkbox':'';}});'use strict';(function(window){window.define=function(x){window.d3=x;};window.define.amd=true;})(this);!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function d(){Xo.event.preventDefault()}function m(){for(var n,t=Xo.event;n=t.sourceEvent;)t=n;return t}function y(n){for(var t=new p,e=0,r=arguments.length;++e<r;)t[arguments[e]]=v(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=Xo.event;u.target=n,Xo.event=u,t[u.type].apply(e,r)}finally{Xo.event=i}}},t}function x(n){return fa(n,da),n}function M(n){return"function"==typeof n?n:function(){return ha(n,this)}}function _(n){return"function"==typeof n?n:function(){return ga(n,this)}}function b(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=Xo.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function w(n){return n.trim().replace(/\s+/g," ")}function S(n){return new RegExp("(?:^|\\s+)"+Xo.requote(n)+"(?:\\s+|$)","g")}function k(n){return n.trim().split(/^|\s+/)}function E(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=k(n).map(A);var u=n.length;return"function"==typeof t?r:e}function A(n){var t=S(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",w(u+" "+n))):e.setAttribute("class",w(u.replace(t," ")))}}function C(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function N(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function L(n){return"function"==typeof n?n:(n=Xo.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function T(n){return{__data__:n}}function q(n){return function(){return va(this,n)}}function z(n){return arguments.length||(n=Xo.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function R(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function U(){var n=this.__transition__;n&&++n.active}function j(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=c(t,Bo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+Xo.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),c=H;a>0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o,a=0,c=0,s=0;if(u=/([a-z]+)\((.*)\)/i.exec(n))switch(i=u[2].split(","),u[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Mt(i[0]),Mt(i[1]),Mt(i[2]))}return(o=Va.get(n))?t(o.r,o.g,o.b):(null!=n&&"#"===n.charAt(0)&&(r=parseInt(n.substring(1),16),isNaN(r)||(4===n.length?(a=(3840&r)>>4,a=a>>4|a,c=240&r,c=c>>4|c,s=15&r,s=s<<4|s):7===n.length&&(a=(16711680&r)>>16,c=(65280&r)>>8,s=255&r))),t(a,c,s))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return $a=n,e}function Nt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Lt(n,t){var e=Math.pow(10,3*oa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Tt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function zt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=zt;var r=new zt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=zt;var r=new zt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=uc[e=n.charAt(++a)])&&(e=n.charAt(++a)),(i=C[e])&&(e=i(t,null==u?"e"===e?" ":"0":u)),o.push(e),c=a+1);return o.push(n.substring(c,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},u=e(r,n,t,0);if(u!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var i=null!=r.Z&&ec!==zt,o=new(i?zt:ec);return"j"in r?o.setFullYear(r.y,0,r.j):"w"in r&&("W"in r||"U"in r)?(o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+Math.floor(r.Z/100),r.M+r.Z%100,r.S,r.L),i?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=zt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=zt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function Ft(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ot(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Yt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function It(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Zt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.y=Xt(+r[0]),e+r[0].length):-1}function Vt(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Xt(n){return n+(n>68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function re(){}function ue(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function ie(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function oe(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function ae(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)oe(n[e],t,1);t.polygonEnd()}function ce(){function n(n,t){n*=Na,t=t*Na/2+Sa/4;var e=n-r,o=e>=0?1:-1,a=o*e,c=Math.cos(t),s=Math.sin(t),l=i*s,f=u*c+l*Math.cos(a),h=l*o*Math.sin(a);hc.add(Math.atan2(h,f)),r=n,u=c,i=s}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*Na,u=Math.cos(a=(e=a)*Na/2+Sa/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function se(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function le(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function fe(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function they(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ge(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function pe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function ve(n){return[Math.atan2(n[1],n[0]),X(n[2])]}function de(n,t){return oa(n[0]-t[0])<Aa&&oa(n[1]-t[1])<Aa}function me(n,t){n*=Na;var e=Math.cos(t*=Na);ye(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function ye(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function xe(){function n(n,u){n*=Na;var i=Math.cos(u*=Na),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),ye(t,e,r)}var t,e,r;kc.point=function(u,i){u*=Na;var o=Math.cos(i*=Na);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,ye(t,e,r)}}function Me(){kc.point=me}function _e(){function n(n,t){n*=Na;var e=Math.cos(t*=Na),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-V(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),ye(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=Na;var c=Math.cos(a*=Na);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),ye(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Me,kc.point=me}}function be(){return!0}function we(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(de(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function ke(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Ee(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Sa,k=p*x;if(hc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*ka:_,S^h>=e^m>=e){var E=fe(se(f),se(n));pe(E);var A=fe(u,E);pe(A);var C=(S^_>=0?-1:1)*X(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function Te(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)<Aa?(n.point(e,r=(r+o)/2>0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)<Aa&&(e-=u*Aa),oa(i-a)<Aa&&(i-=a*Aa),r=qe(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function qe(n,t,e,r){var u,i,o=Math.sin(n-e);return oa(o)>Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function ze(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]<t[0]?Sa:-Sa;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function Re(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);they(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(they(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)<Aa,N=C||Aa>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)<Aa?k:E):k<=_[1]&&_[1]<=E:A>Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return they(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)<Aa?u>0?0:3:oa(r[0]-e)<Aa?u>0?2:1:oa(r[1]-t)<Aa?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function They(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)<Aa||oa(r-h)<Aa?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||oa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)<Aa?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Sa/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=I(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ea]},e):xr}function yr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return oa(u)<Aa?er:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-I(u)*Math.sqrt(n*n+e*e)]},e)}function xr(n,t){return[n,Math.log(Math.tan(Sa/4+t/2))]}function Mr(n){var t,e=Qe(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=Sa*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function _r(n,t){return[Math.log(Math.tan(Sa/4+t/2)),-n]}function br(n){return n[0]}function wr(n){return n[1]}function Sr(n){for(var t=n.length,e=[0,1],r=2,u=2;t>u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function Tr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Tr(n);for(var c=i;c.circle&&oa(e-c.circle.x)<Aa&&oa(r-c.circle.cy)<Aa;)i=c.P,a.unshift(c),Tr(c),c=i;a.unshift(c),Or(c);for(var s=o;s.circle&&oa(e-s.circle.x)<Aa&&oa(r-s.circle.cy)<Aa;)o=s.N,a.push(s),Tr(s),s=o;a.push(s),Or(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function zr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)<Aa&&p-u>Aa?{x:f,y:oa(t-f)<Aa?e:p}:oa(u-p)<Aa&&h-r>Aa?{x:oa(e-p)<Aa?t:h,y:p}:oa(r-h)<Aa&&u-g>Aa?{x:h,y:oa(t-h)<Aa?e:g}:oa(u-g)<Aa&&r-f>Aa?{x:oa(e-g)<Aa?t:f,y:g}:null),i.site,null)),++c)}function jr(n,t){return t.angle-n.angle}function Hr(){Jr(this),this.x=this.y=this.arc=this.site=this.cy=null}function Fr(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function Or(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),Jr(t),n.circle=null)}function Yr(n){for(var t,e=Vc,r=De(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!Ir(t,n)||!r(t)||oa(t.a.x-t.b.x)<Aa&&oa(t.a.y-t.b.y)<Aa)&&(t.a=t.b=null,e.splice(u,1))}function Ir(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2;if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function Zr(n,t){this.l=n,this.r=t,this.a=this.b=null}function Vr(n,t,e,r){var u=new Zr(n,t);return Vc.push(u),e&&$r(u,n,t,e),r&&$r(u,t,n,r),Xc[n.i].edges.push(new Br(u,n,t)),Xc[t.i].edges.push(new Br(u,t,n)),u}function Xr(n,t,e){var r=new Zr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function $r(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function Br(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function Wr(){this._=null}function Jr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function Gr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function Kr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Qr(n){for(;n.L;)n=n.L;return n}function nu(n,t){var e,r,u,i=n.sort(tu).pop();for(Vc=[],Xc=new Array(n.length),$c=new Wr,Wc=new Wr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new Pr(i),zr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;qr(u.arc)}t&&(Yr(t),Ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function tu(n,t){return t.y-n.y||t.x-n.x}function eu(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function ru(n){return n.x}function uu(n){return n.y}function iu(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function ou(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&ou(n,c[0],e,r,o,a),c[1]&&ou(n,c[1],o,r,u,a),c[2]&&ou(n,c[2],e,a,o,i),c[3]&&ou(n,c[3],o,a,u,i)}}function au(n,t){n=Xo.rgb(n),t=Xo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+vt(Math.round(e+i*n))+vt(Math.round(r+o*n))+vt(Math.round(u+a*n))}}function cu(n,t){var e,r={},u={};for(e in n)e in t?r[e]=fu(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function su(n,t){return t-=n=+n,function(e){return n+t*e}}function lu(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=Tu(t,e),i=qu(zu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*La,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*La:0}function Tu(n,t){return n[0]*t[0]+n[1]*t[1]}function qu(n){var t=Math.sqrt(Tu(n,n));return t&&(n[0]/=t,n[1]/=t),t}function zu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ru(n,t){var e,r=[],u=[],i=Xo.transform(n),o=Xo.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:su(a[0],c[0])},{i:3,x:su(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function Du(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function Pu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function Uu(n){for(var t=n.source,e=n.target,r=Hu(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function ju(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Hu(n,t){if(n===t)return n;for(var e=ju(n),r=ju(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function Fu(n){n.fixed|=2}function Ou(n){n.fixed&=-7}function Yu(n){n.fixed|=4,n.px=n.x,n.py=n.y}function Iu(n){n.fixed&=-5}function Zu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(Zu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function Vu(n,t){return Xo.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=Wu,n}function Xu(n){return n.children}function $u(n){return n.value}function Bu(n,t){return t.value-n.value}function Wu(n){return Xo.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function Ju(n){return n.x}function Gu(n){return n.y}function Ku(n,t,e){n.y0=t,n.y=e}function Qu(n){return Xo.range(n.length)}function ni(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function ti(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=li(e[i],t),n)>0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function vi(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Mi(r,u=a):Mi(r=c,u),o--):(xi(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)ki(u[i],t,e,r)}function Ei(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Ai(n){return 1+Xo.max(n,function(n){return n.y})}function Ci(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ni(n){var t=n.children;return t&&t.length?Ni(t[0]):n}function Li(n){var t,e=n.children;return e&&(t=e.length)?Li(e[t-1]):n}function Ti(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function qi(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function zi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():zi(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=Xo.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function Hi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=zi(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=zi(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return zi(t.a[0])},e.copy=function(){return Ji(n,t)},e.domain(n)}function Gi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=Xo.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[Xo.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort(Xo.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return Gi(n,t)},e()}function Ki(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=br,r=wr,u=be,i=oo,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=Ms.get(n)||oo).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function oo(n){return n.join("L")}function ao(n){return oo(n)+"Z"}function co(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function lo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function fo(n,t){return n.length<4?oo(n):n[1]+po(n.slice(1,n.length-1),vo(n,t))}function ho(n,t){return n.length<3?oo(n):n[0]+po((n.push(n[0]),n),vo([n[n.length-2]].concat(n,[n[1]]),t))}function go(n,t){return n.length<3?oo(n):n[0]+po(n,vo(n,t))}function po(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return oo(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function vo(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function mo(n){if(n.length<3)return oo(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",_o(ws,o),",",_o(ws,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),bo(c,o,a);return n.pop(),c.push("L",r),c.join("")}function yo(n){if(n.length<4)return oo(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(_o(ws,i)+","+_o(ws,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),bo(e,i,o);return e.join("")}function xo(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[_o(ws,o),",",_o(ws,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),bo(t,o,a);return t.join("")}function Mo(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return mo(n)}function _o(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function bo(n,t,e){n.push("C",_o(_s,t),",",_o(_s,e),",",_o(bs,t),",",_o(bs,e),",",_o(ws,t),",",_o(ws,e))}function wo(n,t){return(t[1]-n[1])/(t[0]-n[0])}function So(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=wo(u,i);++t<e;)r[t]=(o+(o=wo(u=i,i=n[t+1])))/2;return r[t]=o,r}function ko(n){for(var t,e,r,u,i=[],o=So(n),a=-1,c=n.length-1;++a<c;)t=wo(n[a],n[a+1]),oa(t)<Aa?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ys,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Co(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=_t(e),_=_t(u),b=e===r?function(){return g}:_t(r),w=u===i?function(){return p}:_t(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=br,r=br,u=0,i=wr,o=be,a=oo,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=Ms.get(n)||oo).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function No(n){return n.radius}function Lo(n){return[n.x,n.y]}function To(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ys;return[e*Math.cos(r),e*Math.sin(r)]}}function qo(){return 64}function zo(){return"circle"}function Ro(n){var t=Math.sqrt(n/Sa);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Do(n,t){return fa(n,Ns),n.id=t,n}function Po(n,t,e,r){var u=n.id;return R(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function Uo(n){return null==n&&(n=""),function(){this.textContent=n}}function jo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,Xo.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]<js[i]/u?i-1:i]:[Os,Yi(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=zi(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=zi(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.3"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},Xo.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},Xo.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r;return i?u+i*(n[r]-u):u},Xo.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var ia=Xo.bisector(function(n){return n});Xo.bisectLeft=ia.left,Xo.bisect=Xo.bisectRight=ia.right,Xo.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},Xo.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},Xo.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},Xo.transpose=function(n){return Xo.zip.apply(Xo,n)},Xo.keys=function(n){var t=[];for(var e in n)t.push(e);return t},Xo.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},Xo.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},Xo.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},Xo.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:i,get:function(n){return this[aa+n]},set:function(n,t){return this[aa+n]=t},remove:o,keys:a,values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1),this[t])}});var aa="\x00",ca=aa.charCodeAt(0);Xo.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=f(n,t,t[e]);return n};var sa=["webkit","ms","moz","Moz","o","O"];Xo.dispatch=function(){for(var n=new p,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=v(n);return n},p.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=Sizzle,va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return x(i)},da.selectAll=function(n){var t,e,r=[];n=_(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Bo(n.call(e,e.__data__,a,u))),t.parentNode=e);return x(r)};var ma={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};Xo.ns={prefix:ma,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!S(n[u]).test(t))return!1;return!0}for(t in n)this.each(E(t,n[t]));return this}return this.each(E(n,t))},da.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=T(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=T(o);for(;f>r;++r)p[r]=T(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=D([]),s=x([]),l=x([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},da.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},da.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=z.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},da.each=function(n){return R(this,function(t,e,r){n.call(t,t.__data__,e,r)})},da.call=function(n){var t=Bo(arguments);return n.apply(t[0]=this,t),this},da.empty=function(){return!this.node()},da.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return x(o)},ya.insert=function(n,t){return arguments.length<2&&(t=P(this)),da.insert.call(this,n,t)},da.transition=function(){for(var n,t,e=ks||++Ls,r=[],u=Es||{time:Date.now(),ease:yu,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&jo(t,c,e,u),n.push(t)}return Do(r,e)},da.interrupt=function(){return this.each(U)},Xo.select=function(n){var t=["string"==typeof n?ha(n,Wo):n];return t.parentNode=Jo,x([t])},Xo.selectAll=function(n){var t=Bo("string"==typeof n?ga(n,Wo):n);return t.parentNode=Jo,x([t])};var xa=Xo.select(Jo);da.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,Ta=Math.SQRT2,qa=2,za=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(Ta*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(Ta*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+za*f)/(2*i*qa*h),p=(c*c-i*i-za*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Ta;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=T.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=T.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=T.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=T.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",T=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=T.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,T,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<s;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;s>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv(" ","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;zt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:Tt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)ie(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){oe(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)oe(e[r],t,0)},Polygon:function(n,t){ae(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)ae(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)ie(e[r],t)}};Xo.geo.area=function(n){return fc=0,Xo.geo.stream(n,gc),fc};var fc,hc=new re,gc={sphere:function(){fc+=4*Sa},point:g,lineStart:g,lineEnd:g,polygonStart:function(){hc.reset(),gc.lineStart=ce},polygonEnd:function(){var n=2*hc;fc+=0>n?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,Te,ze,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(They)}).raw=They,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t<l.length-h;++t)g.push(n[a[l[t]][2]]);return g}var e=br,r=wr;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Xo.geom.polygon=function(n){return fa(n,Zc),n};var Zc=Xo.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=Cr(n),c=-1,s=this.length-Cr(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Er(o,l,u)?(Er(i,l,u)||n.push(Ar(i,o,l,u)),n.push(o)):Er(i,l,u)&&n.push(Ar(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];Pr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(jr),t.length},Br.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},Wr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Qr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(Gr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Kr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(Kr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Gr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Qr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,Gr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,Kr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,Gr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,Kr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,Gr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,Kr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},Xo.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return nu(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&eu(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=_t(r=n),t):r},t.y=function(n){return arguments.length?(o=_t(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];Xo.geom.delaunay=function(n){return Xo.geom.voronoi().triangles(n)},Xo.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(oa(c-e)+oa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=br,c=wr;return(o=arguments.length)?(a=ru,c=uu,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},Xo.interpolateRgb=au,Xo.interpolateObject=cu,Xo.interpolateNumber=su,Xo.interpolateString=lu;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;Xo.interpolate=fu,Xo.interpolators=[function(n,t){var e=typeof t;return("string"===e?Va.has(t)||/^(#|rgb\(|hsl\()/.test(t)?au:lu:t instanceof G?au:"object"===e?Array.isArray(t)?hu:cu:su)(n,t)}],Xo.interpolateArray=hu;var ns=function(){return bt},ts=Xo.map({linear:ns,poly:xu,quad:function(){return du},cubic:function(){return mu},sin:function(){return Mu},exp:function(){return _u},circle:function(){return bu},elastic:wu,back:Su,bounce:function(){return ku}}),es=Xo.map({"in":bt,out:pu,"in-out":vu,"out-in":function(n){return vu(pu(n))}});Xo.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Uu(n[e]));return t}},Xo.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=Xo.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push(Xo.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(ka-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},Xo.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=u-e,c=i*i+o*o;if(c>a*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=m.length,l=y.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=Bu,u=Xu,i=$u;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},Xo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=Xo.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},Vu(e,r)},Xo.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/Xo.sum(o),s=Xo.range(i.length);null!=e&&s.sort(e===as?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=as,r=0,u=ka;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var as={};Xo.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=Xo.permute(s,f),l=Xo.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;vi(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=si(a),i=ci(i),a&&i;)c=ci(c),o=si(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=Xo.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ti,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ti(t):qi(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return qi(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ti:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},Vu(i,a)},Xo.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[])},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(To(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=zo,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),jo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return Do(i,u)},Ns.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=_(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&jo(u,g,o,i),t.push(u)}return Do(a,o)},Ns.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=Ts,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":Ts,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ts="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return zs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=f[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=Ri(t),c=a[0],s=a[1],p=L[e],v=e?f:l,d=v[1]-v[0];return C&&(c-=p,s-=d+p),r=(e?g:h)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=p)+d:(x&&(p=Math.max(c,Math.min(s,2*x[e]-r))),r>p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),T=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?T.on("touchmove.brush",v).on("touchend.brush",y):T.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],f[1-z]-L[1]],L[0]=l[q],L[1]=f[z]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var zs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(Math.ceil(n/e)*e,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}();'use strict';(function(window){window.define=undefined;}).call(this,this);'use strict';tr.exportTo('tr.ui.b',function(){const DataSeriesEnableChangeEventType='data-series-enabled-change';const THIS_DOC=document.currentScript.ownerDocument;const svgNS='http://www.w3.org/2000/svg';const ColorScheme=tr.b.ColorScheme;function getColorOfKey(key,selected){let id=ColorScheme.getColorIdForGeneralPurposeString(key);if(selected){id+=ColorScheme.properties.brightenedOffsets[0];} +return this.rangeOfInterest_;},get rangeOfInterest(){return this.rangeOfInterest_;},set rangeOfInterest(rangeOfInterest){this.rangeOfInterest_=rangeOfInterest;this.updateContents_();},get selection(){},set selection(_){},get textLabel(){return'Frame Data';},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();}});tr.ui.side_panel.SidePanelRegistry.register(function(){return document.createElement('tr-ui-e-s-frame-data-side-panel');});});'use strict';Polymer({is:'tr-ui-b-chart-legend-key',ready(){this.$.checkbox.addEventListener('change',this.onCheckboxChange_.bind(this));},onCheckboxChange_(){tr.b.dispatchSimpleEvent(this,tr.ui.b.DataSeriesEnableChangeEventType,true,false,{key:Polymer.dom(this).textContent,enabled:this.enabled});},set textContent(t){Polymer.dom(this.$.label).textContent=t;Polymer.dom(this.$.link).textContent=t;this.updateContents_();},set width(w){w-=20;this.$.link.style.width=w+'px';this.$.label.style.width=w+'px';},get textContent(){return Polymer.dom(this.$.label).textContent;},set optional(optional){this.$.checkbox.style.visibility=optional?'visible':'hidden';},get optional(){return this.$.checkbox.style.visibility==='visible';},set enabled(enabled){this.$.checkbox.checked=enabled?'checked':'';},get enabled(){return this.$.checkbox.checked;},set color(c){this.$.label.style.color=c;this.$.link.color=c;},set target(target){this.$.link.setSelectionAndContent(target,Polymer.dom(this.$.label).textContent);this.updateContents_();},get target(){return this.$.link.selection;},set title(title){this.$.link.title=title;},updateContents_(){this.$.link.style.display=this.target?'':'none';this.$.label.style.display=this.target?'none':'';this.$.label.htmlFor=this.optional?'checkbox':'';}});'use strict';(function(window){window.define=function(x){window.d3=x;};window.define.amd=true;})(this);!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function d(){Xo.event.preventDefault()}function m(){for(var n,t=Xo.event;n=t.sourceEvent;)t=n;return t}function y(n){for(var t=new p,e=0,r=arguments.length;++e<r;)t[arguments[e]]=v(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=Xo.event;u.target=n,Xo.event=u,t[u.type].apply(e,r)}finally{Xo.event=i}}},t}function x(n){return fa(n,da),n}function M(n){return"function"==typeof n?n:function(){return ha(n,this)}}function _(n){return"function"==typeof n?n:function(){return ga(n,this)}}function b(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=Xo.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function w(n){return n.trim().replace(/\s+/g," ")}function S(n){return new RegExp("(?:^|\\s+)"+Xo.requote(n)+"(?:\\s+|$)","g")}function k(n){return n.trim().split(/^|\s+/)}function E(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=k(n).map(A);var u=n.length;return"function"==typeof t?r:e}function A(n){var t=S(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",w(u+" "+n))):e.setAttribute("class",w(u.replace(t," ")))}}function C(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function N(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function L(n){return"function"==typeof n?n:(n=Xo.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function T(n){return{__data__:n}}function q(n){return function(){return va(this,n)}}function z(n){return arguments.length||(n=Xo.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function R(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function U(){var n=this.__transition__;n&&++n.active}function j(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=c(t,Bo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+Xo.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),c=H;a>0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o,a=0,c=0,s=0;if(u=/([a-z]+)\((.*)\)/i.exec(n))switch(i=u[2].split(","),u[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Mt(i[0]),Mt(i[1]),Mt(i[2]))}return(o=Va.get(n))?t(o.r,o.g,o.b):(null!=n&&"#"===n.charAt(0)&&(r=parseInt(n.substring(1),16),isNaN(r)||(4===n.length?(a=(3840&r)>>4,a=a>>4|a,c=240&r,c=c>>4|c,s=15&r,s=s<<4|s):7===n.length&&(a=(16711680&r)>>16,c=(65280&r)>>8,s=255&r))),t(a,c,s))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return $a=n,e}function Nt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Lt(n,t){var e=Math.pow(10,3*oa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Tt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function zt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=zt;var r=new zt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=zt;var r=new zt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=uc[e=n.charAt(++a)])&&(e=n.charAt(++a)),(i=C[e])&&(e=i(t,null==u?"e"===e?" ":"0":u)),o.push(e),c=a+1);return o.push(n.substring(c,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},u=e(r,n,t,0);if(u!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var i=null!=r.Z&&ec!==zt,o=new(i?zt:ec);return"j"in r?o.setFullYear(r.y,0,r.j):"w"in r&&("W"in r||"U"in r)?(o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+Math.floor(r.Z/100),r.M+r.Z%100,r.S,r.L),i?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=zt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=zt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function Ft(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ot(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Yt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function It(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Zt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.y=Xt(+r[0]),e+r[0].length):-1}function Vt(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Xt(n){return n+(n>68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function re(){}function ue(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function ie(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function oe(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function ae(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)oe(n[e],t,1);t.polygonEnd()}function ce(){function n(n,t){n*=Na,t=t*Na/2+Sa/4;var e=n-r,o=e>=0?1:-1,a=o*e,c=Math.cos(t),s=Math.sin(t),l=i*s,f=u*c+l*Math.cos(a),h=l*o*Math.sin(a);hc.add(Math.atan2(h,f)),r=n,u=c,i=s}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*Na,u=Math.cos(a=(e=a)*Na/2+Sa/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function se(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function le(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function fe(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function he(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ge(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function pe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function ve(n){return[Math.atan2(n[1],n[0]),X(n[2])]}function de(n,t){return oa(n[0]-t[0])<Aa&&oa(n[1]-t[1])<Aa}function me(n,t){n*=Na;var e=Math.cos(t*=Na);ye(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function ye(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function xe(){function n(n,u){n*=Na;var i=Math.cos(u*=Na),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),ye(t,e,r)}var t,e,r;kc.point=function(u,i){u*=Na;var o=Math.cos(i*=Na);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,ye(t,e,r)}}function Me(){kc.point=me}function _e(){function n(n,t){n*=Na;var e=Math.cos(t*=Na),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-V(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),ye(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=Na;var c=Math.cos(a*=Na);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),ye(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Me,kc.point=me}}function be(){return!0}function we(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(de(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function ke(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Ee(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Sa,k=p*x;if(hc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*ka:_,S^h>=e^m>=e){var E=fe(se(f),se(n));pe(E);var A=fe(u,E);pe(A);var C=(S^_>=0?-1:1)*X(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function Te(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)<Aa?(n.point(e,r=(r+o)/2>0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)<Aa&&(e-=u*Aa),oa(i-a)<Aa&&(i-=a*Aa),r=qe(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function qe(n,t,e,r){var u,i,o=Math.sin(n-e);return oa(o)>Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function ze(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]<t[0]?Sa:-Sa;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function Re(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);he(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(he(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)<Aa,N=C||Aa>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)<Aa?k:E):k<=_[1]&&_[1]<=E:A>Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return he(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)<Aa?u>0?0:3:oa(r[0]-e)<Aa?u>0?2:1:oa(r[1]-t)<Aa?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function He(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)<Aa||oa(r-h)<Aa?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||oa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)<Aa?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Sa/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=I(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ea]},e):xr}function yr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return oa(u)<Aa?er:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-I(u)*Math.sqrt(n*n+e*e)]},e)}function xr(n,t){return[n,Math.log(Math.tan(Sa/4+t/2))]}function Mr(n){var t,e=Qe(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=Sa*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function _r(n,t){return[Math.log(Math.tan(Sa/4+t/2)),-n]}function br(n){return n[0]}function wr(n){return n[1]}function Sr(n){for(var t=n.length,e=[0,1],r=2,u=2;t>u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function Tr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Tr(n);for(var c=i;c.circle&&oa(e-c.circle.x)<Aa&&oa(r-c.circle.cy)<Aa;)i=c.P,a.unshift(c),Tr(c),c=i;a.unshift(c),Or(c);for(var s=o;s.circle&&oa(e-s.circle.x)<Aa&&oa(r-s.circle.cy)<Aa;)o=s.N,a.push(s),Tr(s),s=o;a.push(s),Or(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function zr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)<Aa&&p-u>Aa?{x:f,y:oa(t-f)<Aa?e:p}:oa(u-p)<Aa&&h-r>Aa?{x:oa(e-p)<Aa?t:h,y:p}:oa(r-h)<Aa&&u-g>Aa?{x:h,y:oa(t-h)<Aa?e:g}:oa(u-g)<Aa&&r-f>Aa?{x:oa(e-g)<Aa?t:f,y:g}:null),i.site,null)),++c)}function jr(n,t){return t.angle-n.angle}function Hr(){Jr(this),this.x=this.y=this.arc=this.site=this.cy=null}function Fr(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function Or(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),Jr(t),n.circle=null)}function Yr(n){for(var t,e=Vc,r=De(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!Ir(t,n)||!r(t)||oa(t.a.x-t.b.x)<Aa&&oa(t.a.y-t.b.y)<Aa)&&(t.a=t.b=null,e.splice(u,1))}function Ir(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2;if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function Zr(n,t){this.l=n,this.r=t,this.a=this.b=null}function Vr(n,t,e,r){var u=new Zr(n,t);return Vc.push(u),e&&$r(u,n,t,e),r&&$r(u,t,n,r),Xc[n.i].edges.push(new Br(u,n,t)),Xc[t.i].edges.push(new Br(u,t,n)),u}function Xr(n,t,e){var r=new Zr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function $r(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function Br(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function Wr(){this._=null}function Jr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function Gr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function Kr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Qr(n){for(;n.L;)n=n.L;return n}function nu(n,t){var e,r,u,i=n.sort(tu).pop();for(Vc=[],Xc=new Array(n.length),$c=new Wr,Wc=new Wr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new Pr(i),zr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;qr(u.arc)}t&&(Yr(t),Ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function tu(n,t){return t.y-n.y||t.x-n.x}function eu(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function ru(n){return n.x}function uu(n){return n.y}function iu(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function ou(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&ou(n,c[0],e,r,o,a),c[1]&&ou(n,c[1],o,r,u,a),c[2]&&ou(n,c[2],e,a,o,i),c[3]&&ou(n,c[3],o,a,u,i)}}function au(n,t){n=Xo.rgb(n),t=Xo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+vt(Math.round(e+i*n))+vt(Math.round(r+o*n))+vt(Math.round(u+a*n))}}function cu(n,t){var e,r={},u={};for(e in n)e in t?r[e]=fu(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function su(n,t){return t-=n=+n,function(e){return n+t*e}}function lu(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=Tu(t,e),i=qu(zu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*La,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*La:0}function Tu(n,t){return n[0]*t[0]+n[1]*t[1]}function qu(n){var t=Math.sqrt(Tu(n,n));return t&&(n[0]/=t,n[1]/=t),t}function zu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ru(n,t){var e,r=[],u=[],i=Xo.transform(n),o=Xo.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:su(a[0],c[0])},{i:3,x:su(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function Du(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function Pu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function Uu(n){for(var t=n.source,e=n.target,r=Hu(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function ju(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Hu(n,t){if(n===t)return n;for(var e=ju(n),r=ju(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function Fu(n){n.fixed|=2}function Ou(n){n.fixed&=-7}function Yu(n){n.fixed|=4,n.px=n.x,n.py=n.y}function Iu(n){n.fixed&=-5}function Zu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(Zu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function Vu(n,t){return Xo.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=Wu,n}function Xu(n){return n.children}function $u(n){return n.value}function Bu(n,t){return t.value-n.value}function Wu(n){return Xo.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function Ju(n){return n.x}function Gu(n){return n.y}function Ku(n,t,e){n.y0=t,n.y=e}function Qu(n){return Xo.range(n.length)}function ni(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function ti(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=li(e[i],t),n)>0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function vi(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Mi(r,u=a):Mi(r=c,u),o--):(xi(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)ki(u[i],t,e,r)}function Ei(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Ai(n){return 1+Xo.max(n,function(n){return n.y})}function Ci(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ni(n){var t=n.children;return t&&t.length?Ni(t[0]):n}function Li(n){var t,e=n.children;return e&&(t=e.length)?Li(e[t-1]):n}function Ti(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function qi(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function zi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():zi(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=Xo.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function Hi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=zi(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=zi(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return zi(t.a[0])},e.copy=function(){return Ji(n,t)},e.domain(n)}function Gi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=Xo.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[Xo.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort(Xo.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return Gi(n,t)},e()}function Ki(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=br,r=wr,u=be,i=oo,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=Ms.get(n)||oo).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function oo(n){return n.join("L")}function ao(n){return oo(n)+"Z"}function co(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function lo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function fo(n,t){return n.length<4?oo(n):n[1]+po(n.slice(1,n.length-1),vo(n,t))}function ho(n,t){return n.length<3?oo(n):n[0]+po((n.push(n[0]),n),vo([n[n.length-2]].concat(n,[n[1]]),t))}function go(n,t){return n.length<3?oo(n):n[0]+po(n,vo(n,t))}function po(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return oo(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function vo(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function mo(n){if(n.length<3)return oo(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",_o(ws,o),",",_o(ws,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),bo(c,o,a);return n.pop(),c.push("L",r),c.join("")}function yo(n){if(n.length<4)return oo(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(_o(ws,i)+","+_o(ws,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),bo(e,i,o);return e.join("")}function xo(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[_o(ws,o),",",_o(ws,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),bo(t,o,a);return t.join("")}function Mo(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return mo(n)}function _o(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function bo(n,t,e){n.push("C",_o(_s,t),",",_o(_s,e),",",_o(bs,t),",",_o(bs,e),",",_o(ws,t),",",_o(ws,e))}function wo(n,t){return(t[1]-n[1])/(t[0]-n[0])}function So(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=wo(u,i);++t<e;)r[t]=(o+(o=wo(u=i,i=n[t+1])))/2;return r[t]=o,r}function ko(n){for(var t,e,r,u,i=[],o=So(n),a=-1,c=n.length-1;++a<c;)t=wo(n[a],n[a+1]),oa(t)<Aa?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ys,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Co(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=_t(e),_=_t(u),b=e===r?function(){return g}:_t(r),w=u===i?function(){return p}:_t(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=br,r=br,u=0,i=wr,o=be,a=oo,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=Ms.get(n)||oo).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function No(n){return n.radius}function Lo(n){return[n.x,n.y]}function To(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ys;return[e*Math.cos(r),e*Math.sin(r)]}}function qo(){return 64}function zo(){return"circle"}function Ro(n){var t=Math.sqrt(n/Sa);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Do(n,t){return fa(n,Ns),n.id=t,n}function Po(n,t,e,r){var u=n.id;return R(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function Uo(n){return null==n&&(n=""),function(){this.textContent=n}}function jo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,Xo.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]<js[i]/u?i-1:i]:[Os,Yi(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=zi(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=zi(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.3"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},Xo.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},Xo.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r;return i?u+i*(n[r]-u):u},Xo.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var ia=Xo.bisector(function(n){return n});Xo.bisectLeft=ia.left,Xo.bisect=Xo.bisectRight=ia.right,Xo.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},Xo.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},Xo.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},Xo.transpose=function(n){return Xo.zip.apply(Xo,n)},Xo.keys=function(n){var t=[];for(var e in n)t.push(e);return t},Xo.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},Xo.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},Xo.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},Xo.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:i,get:function(n){return this[aa+n]},set:function(n,t){return this[aa+n]=t},remove:o,keys:a,values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1),this[t])}});var aa="\x00",ca=aa.charCodeAt(0);Xo.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=f(n,t,t[e]);return n};var sa=["webkit","ms","moz","Moz","o","O"];Xo.dispatch=function(){for(var n=new p,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=v(n);return n},p.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=Sizzle,va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return x(i)},da.selectAll=function(n){var t,e,r=[];n=_(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Bo(n.call(e,e.__data__,a,u))),t.parentNode=e);return x(r)};var ma={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};Xo.ns={prefix:ma,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!S(n[u]).test(t))return!1;return!0}for(t in n)this.each(E(t,n[t]));return this}return this.each(E(n,t))},da.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=T(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=T(o);for(;f>r;++r)p[r]=T(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=D([]),s=x([]),l=x([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},da.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},da.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=z.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},da.each=function(n){return R(this,function(t,e,r){n.call(t,t.__data__,e,r)})},da.call=function(n){var t=Bo(arguments);return n.apply(t[0]=this,t),this},da.empty=function(){return!this.node()},da.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return x(o)},ya.insert=function(n,t){return arguments.length<2&&(t=P(this)),da.insert.call(this,n,t)},da.transition=function(){for(var n,t,e=ks||++Ls,r=[],u=Es||{time:Date.now(),ease:yu,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&jo(t,c,e,u),n.push(t)}return Do(r,e)},da.interrupt=function(){return this.each(U)},Xo.select=function(n){var t=["string"==typeof n?ha(n,Wo):n];return t.parentNode=Jo,x([t])},Xo.selectAll=function(n){var t=Bo("string"==typeof n?ga(n,Wo):n);return t.parentNode=Jo,x([t])};var xa=Xo.select(Jo);da.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,Ta=Math.SQRT2,qa=2,za=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(Ta*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(Ta*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+za*f)/(2*i*qa*h),p=(c*c-i*i-za*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Ta;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=T.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=T.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=T.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=T.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",T=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=T.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,T,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<s;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;s>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv(" ","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;zt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:Tt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)ie(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){oe(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)oe(e[r],t,0)},Polygon:function(n,t){ae(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)ae(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)ie(e[r],t)}};Xo.geo.area=function(n){return fc=0,Xo.geo.stream(n,gc),fc};var fc,hc=new re,gc={sphere:function(){fc+=4*Sa},point:g,lineStart:g,lineEnd:g,polygonStart:function(){hc.reset(),gc.lineStart=ce},polygonEnd:function(){var n=2*hc;fc+=0>n?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,Te,ze,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(He)}).raw=He,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t<l.length-h;++t)g.push(n[a[l[t]][2]]);return g}var e=br,r=wr;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Xo.geom.polygon=function(n){return fa(n,Zc),n};var Zc=Xo.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=Cr(n),c=-1,s=this.length-Cr(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Er(o,l,u)?(Er(i,l,u)||n.push(Ar(i,o,l,u)),n.push(o)):Er(i,l,u)&&n.push(Ar(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];Pr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(jr),t.length},Br.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},Wr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Qr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(Gr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Kr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(Kr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Gr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Qr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,Gr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,Kr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,Gr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,Kr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,Gr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,Kr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},Xo.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return nu(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&eu(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=_t(r=n),t):r},t.y=function(n){return arguments.length?(o=_t(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];Xo.geom.delaunay=function(n){return Xo.geom.voronoi().triangles(n)},Xo.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(oa(c-e)+oa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=br,c=wr;return(o=arguments.length)?(a=ru,c=uu,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},Xo.interpolateRgb=au,Xo.interpolateObject=cu,Xo.interpolateNumber=su,Xo.interpolateString=lu;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;Xo.interpolate=fu,Xo.interpolators=[function(n,t){var e=typeof t;return("string"===e?Va.has(t)||/^(#|rgb\(|hsl\()/.test(t)?au:lu:t instanceof G?au:"object"===e?Array.isArray(t)?hu:cu:su)(n,t)}],Xo.interpolateArray=hu;var ns=function(){return bt},ts=Xo.map({linear:ns,poly:xu,quad:function(){return du},cubic:function(){return mu},sin:function(){return Mu},exp:function(){return _u},circle:function(){return bu},elastic:wu,back:Su,bounce:function(){return ku}}),es=Xo.map({"in":bt,out:pu,"in-out":vu,"out-in":function(n){return vu(pu(n))}});Xo.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Uu(n[e]));return t}},Xo.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=Xo.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push(Xo.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(ka-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},Xo.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=u-e,c=i*i+o*o;if(c>a*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=m.length,l=y.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=Bu,u=Xu,i=$u;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},Xo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=Xo.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},Vu(e,r)},Xo.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/Xo.sum(o),s=Xo.range(i.length);null!=e&&s.sort(e===as?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=as,r=0,u=ka;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var as={};Xo.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=Xo.permute(s,f),l=Xo.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;vi(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=si(a),i=ci(i),a&&i;)c=ci(c),o=si(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=Xo.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ti,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ti(t):qi(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return qi(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ti:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},Vu(i,a)},Xo.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[])},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(To(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=zo,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),jo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return Do(i,u)},Ns.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=_(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&jo(u,g,o,i),t.push(u)}return Do(a,o)},Ns.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=Ts,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":Ts,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ts="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return zs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=f[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=Ri(t),c=a[0],s=a[1],p=L[e],v=e?f:l,d=v[1]-v[0];return C&&(c-=p,s-=d+p),r=(e?g:h)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=p)+d:(x&&(p=Math.max(c,Math.min(s,2*x[e]-r))),r>p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),T=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?T.on("touchmove.brush",v).on("touchend.brush",y):T.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],f[1-z]-L[1]],L[0]=l[q],L[1]=f[z]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var zs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(Math.ceil(n/e)*e,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}();'use strict';(function(window){window.define=undefined;}).call(this,this);'use strict';tr.exportTo('tr.ui.b',function(){const DataSeriesEnableChangeEventType='data-series-enabled-change';const THIS_DOC=document.currentScript.ownerDocument;const svgNS='http://www.w3.org/2000/svg';const ColorScheme=tr.b.ColorScheme;function getColorOfKey(key,selected){let id=ColorScheme.getColorIdForGeneralPurposeString(key);if(selected){id+=ColorScheme.properties.brightenedOffsets[0];} return ColorScheme.colorsAsStrings[id];} function getSVGTextSize(parentNode,text,opt_callback,opt_this){const textNode=document.createElementNS('http://www.w3.org/2000/svg','text');textNode.setAttributeNS(null,'x',0);textNode.setAttributeNS(null,'y',0);textNode.setAttributeNS(null,'fill','black');textNode.appendChild(document.createTextNode(text));parentNode.appendChild(textNode);if(opt_callback){opt_callback.call(opt_this||parentNode,textNode);} const width=textNode.getComputedTextLength();const height=textNode.getBBox().height;parentNode.removeChild(textNode);return{width,height};} @@ -8200,7 +8200,7 @@ if(rendererHelper.isTelemetryInternalEvent(ev))continue;const frameIdRef=ev.args.frame;if(frameIdRef===undefined)continue;let list=candidatesForFrameId[frameIdRef];if(list===undefined){candidatesForFrameId[frameIdRef]=list=[];} list.push(ev);} return candidatesForFrameId;} -const URL_EXCLUSION=['about:blank','data:text/html,pluginplaceholderdata','data:text/html,chromewebdata'];function shouldIgnoreURL(url){return URL_EXCLUSION.includes(url);} +const URL_BLACKLIST=['about:blank','data:text/html,pluginplaceholderdata','data:text/html,chromewebdata'];function shouldIgnoreURL(url){return URL_BLACKLIST.includes(url);} function collectTimeToEvent(category,eventName,rendererHelper,navigationStartFinder){const targetEvents=findAllEvents(rendererHelper,category,eventName);const samples=[];for(const ev of targetEvents){if(rendererHelper.isTelemetryInternalEvent(ev))continue;const frameIdRef=ev.args.frame;const snapshot=findFrameLoaderSnapshotAt(rendererHelper,frameIdRef,ev.start);if(snapshot===undefined||!snapshot.args.isLoadingMainFrame)continue;const url=snapshot.args.documentLoaderURL;if(shouldIgnoreURL(url))continue;const navigationStartEvent=navigationStartFinder.findNavigationStartEventForFrameBeforeTimestamp(frameIdRef,ev.start);if(navigationStartEvent===undefined)continue;const timeToEvent=ev.start-navigationStartEvent.start;samples.push({value:timeToEvent,diagnostics:{url:new tr.v.d.Generic(url)}});} return samples;} function addFirstMeaningfulPaintSample(samples,rendererHelper,frameIdRef,navigationStart,fmpMarkerEvent){const snapshot=findFrameLoaderSnapshotAt(rendererHelper,frameIdRef,fmpMarkerEvent.start);if(!snapshot||!snapshot.args.isLoadingMainFrame)return;const url=snapshot.args.documentLoaderURL;if(shouldIgnoreURL(url))return;const navStartToFMPRange=tr.b.math.Range.fromExplicitRange(navigationStart.start,fmpMarkerEvent.start);const networkEvents=getNetworkEventsInRange(rendererHelper.process,navStartToFMPRange);const timeToFirstMeaningfulPaint=navStartToFMPRange.duration;const breakdownTree=tr.metrics.sh.generateWallClockTimeBreakdownTree(rendererHelper.mainThread,networkEvents,navStartToFMPRange);const breakdownDiagnostic=createBreakdownDiagnostic(breakdownTree);samples.push({value:timeToFirstMeaningfulPaint,diagnostics:{'Breakdown of [navStart, FMP]':breakdownDiagnostic,'Start':new RelatedEventSet(navigationStart),'End':new RelatedEventSet(fmpMarkerEvent),'Navigation infos':new tr.v.d.Generic({url,pid:rendererHelper.pid,start:navigationStart.start,fmp:fmpMarkerEvent.start}),}});return{firstMeaningfulPaint:fmpMarkerEvent.start,url};} @@ -8218,7 +8218,7 @@ return{firstMeaningfulPaintSamples,firstMeaningfulPaintCpuTimeSamples,firstInteractiveSamples};} function collectLoadingMetricsForRenderer(rendererHelper){const navigationStartFinder=new NavigationStartFinder(rendererHelper);const firstContentfulPaintSamples=collectTimeToEvent('loading','firstContentfulPaint',rendererHelper,navigationStartFinder);const onLoadSamples=collectTimeToEvent('blink.user_timing','loadEventStart',rendererHelper,navigationStartFinder);const{firstMeaningfulPaintSamples,firstMeaningfulPaintCpuTimeSamples,firstInteractiveSamples}=collectFirstMeaningfulPaintAndTimeToInteractiveForRenderer(rendererHelper,navigationStartFinder);return{firstContentfulPaintSamples,onLoadSamples,firstMeaningfulPaintSamples,firstMeaningfulPaintCpuTimeSamples,firstInteractiveSamples};} function addSamplesToHistogram(samples,histogram){for(const sample of samples){histogram.addSample(sample.value,sample.diagnostics);}} -function loadingMetric(histograms,model){const firstContentfulPaintHistogram=createHistogram('timeToFirstContentfulPaint');firstContentfulPaintHistogram.description='time to first contentful paint';const onLoadHistogram=createHistogram('timeToOnload');onLoadHistogram.description='time to onload. '+'This is temporary metric used for PCv1/v2 correctness checking';const firstMeaningfulPaintHistogram=createHistogram('timeToFirstMeaningfulPaint');firstMeaningfulPaintHistogram.description='time to first meaningful paint';const firstMeaningfulPaintCpuTimeHistogram=createHistogram('cpuTimeToFirstMeaningfulPaint');firstMeaningfulPaintCpuTimeHistogram.description='CPU time to first meaningful paint';const firstInteractiveHistogram=createHistogram('timeToFirstInteractive');firstInteractiveHistogram.description='time to first interactive';const chromeHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);for(const pid in chromeHelper.rendererHelpers){const rendererHelper=chromeHelper.rendererHelpers[pid];if(rendererHelper.isChromeTracingUI)continue;const{firstContentfulPaintSamples,onLoadSamples,firstMeaningfulPaintSamples,firstMeaningfulPaintCpuTimeSamples,firstInteractiveSamples}=collectLoadingMetricsForRenderer(rendererHelper);addSamplesToHistogram(firstContentfulPaintSamples,firstContentfulPaintHistogram);addSamplesToHistogram(onLoadSamples,onLoadHistogram);addSamplesToHistogram(firstMeaningfulPaintSamples,firstMeaningfulPaintHistogram);addSamplesToHistogram(firstMeaningfulPaintCpuTimeSamples,firstMeaningfulPaintCpuTimeHistogram);addSamplesToHistogram(firstInteractiveSamples,firstInteractiveHistogram);} +function loadingMetric(histograms,model){const firstContentfulPaintHistogram=createHistogram('timeToFirstContentfulPaint');firstContentfulPaintHistogram.description='time to first contentful paint';const onLoadHistogram=createHistogram('timeToOnload');onLoadHistogram.description='time to onload. '+'This is temporary metric used for PCv1/v2 sanity checking';const firstMeaningfulPaintHistogram=createHistogram('timeToFirstMeaningfulPaint');firstMeaningfulPaintHistogram.description='time to first meaningful paint';const firstMeaningfulPaintCpuTimeHistogram=createHistogram('cpuTimeToFirstMeaningfulPaint');firstMeaningfulPaintCpuTimeHistogram.description='CPU time to first meaningful paint';const firstInteractiveHistogram=createHistogram('timeToFirstInteractive');firstInteractiveHistogram.description='time to first interactive';const chromeHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);for(const pid in chromeHelper.rendererHelpers){const rendererHelper=chromeHelper.rendererHelpers[pid];if(rendererHelper.isChromeTracingUI)continue;const{firstContentfulPaintSamples,onLoadSamples,firstMeaningfulPaintSamples,firstMeaningfulPaintCpuTimeSamples,firstInteractiveSamples}=collectLoadingMetricsForRenderer(rendererHelper);addSamplesToHistogram(firstContentfulPaintSamples,firstContentfulPaintHistogram);addSamplesToHistogram(onLoadSamples,onLoadHistogram);addSamplesToHistogram(firstMeaningfulPaintSamples,firstMeaningfulPaintHistogram);addSamplesToHistogram(firstMeaningfulPaintCpuTimeSamples,firstMeaningfulPaintCpuTimeHistogram);addSamplesToHistogram(firstInteractiveSamples,firstInteractiveHistogram);} histograms.addHistogram(firstContentfulPaintHistogram);histograms.addHistogram(onLoadHistogram);histograms.addHistogram(firstMeaningfulPaintHistogram);histograms.addHistogram(firstMeaningfulPaintCpuTimeHistogram);histograms.addHistogram(firstInteractiveHistogram);} tr.metrics.MetricRegistry.register(loadingMetric);return{loadingMetric,getNetworkEventsInRange,collectLoadingMetricsForRenderer,RESPONSIVENESS_THRESHOLD_MS,INTERACTIVE_WINDOW_SIZE_MS,};});'use strict';tr.exportTo('tr.metrics',function(){const SPA_NAVIGATION_START_TO_FIRST_PAINT_DURATION_BIN_BOUNDARY=tr.v.HistogramBinBoundaries.createExponential(1,1000,50);function spaNavigationMetric(histograms,model){const histogram=new tr.v.Histogram('spaNavigationStartToFpDuration',tr.b.Unit.byName.timeDurationInMs_smallerIsBetter,SPA_NAVIGATION_START_TO_FIRST_PAINT_DURATION_BIN_BOUNDARY);histogram.description='Latency between the input event causing'+' a SPA navigation and the first paint event after it';histogram.customizeSummaryOptions({count:false,sum:false,});const modelHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);if(!modelHelper){return;} const rendererHelpers=modelHelper.rendererHelpers;if(!rendererHelpers){return;} @@ -8270,7 +8270,7 @@ if(!found){diagnosticsToCounts.set(diagnostic,1);}}} for(const[diagnostic,count]of diagnosticsToCounts){if(count>1){this.sharedDiagnosticsByGuid_.set(diagnostic.guid,diagnostic);}}} mergeRelationships(){for(const hist of this){hist.diagnostics.mergeRelationships(hist);}}} -HistogramSet.GROUPINGS={HISTOGRAM_NAME:new HistogramGrouping('name',h=>h.name),BENCHMARK_NAME:new HistogramGrouping('benchmark',h=>tr.v.d.TelemetryInfo.getField(h,'benchmarkName','')),BENCHMARK_START:new HistogramGrouping('time',h=>tr.v.d.TelemetryInfo.getField(h,'benchmarkStartString','')),STORYSET_REPEAT:new HistogramGrouping('storyset_repeat',h=>tr.v.d.TelemetryInfo.getField(h,'storysetRepeatCounterLabel',0),'storyset repeat'),STORY_NAME:new HistogramGrouping('story',h=>tr.v.d.TelemetryInfo.getField(h,'storyDisplayName','')),LEGACY_TIR_LABEL:new HistogramGrouping('tir',h=>tr.v.d.TelemetryInfo.getField(h,'legacyTIRLabel','')),PRIMARY_NAME:new HistogramGrouping('primary',h=>tr.v.d.BuildbotInfo.getField(h,'buildbotMasterName','')),PARTNER_NAME:new HistogramGrouping('bot',h=>tr.v.d.BuildbotInfo.getField(h,'buildbotName','')),BUILD_NUMBER:new HistogramGrouping('build',h=>tr.v.d.BuildbotInfo.getField(h,'buildNumber','')),DISPLAY_LABEL:new HistogramGrouping('label',h=>tr.v.d.TelemetryInfo.getField(h,'displayLabel','Value'))};return{HistogramGrouping,HistogramSet,};});'use strict';tr.exportTo('tr.e.chrome',function(){function hasTitleAndCategory(event,title,category){return event.title===title&&event.category&&tr.b.getCategoryParts(event.category).includes(category);} +HistogramSet.GROUPINGS={HISTOGRAM_NAME:new HistogramGrouping('name',h=>h.name),BENCHMARK_NAME:new HistogramGrouping('benchmark',h=>tr.v.d.TelemetryInfo.getField(h,'benchmarkName','')),BENCHMARK_START:new HistogramGrouping('time',h=>tr.v.d.TelemetryInfo.getField(h,'benchmarkStartString','')),STORYSET_REPEAT:new HistogramGrouping('storyset_repeat',h=>tr.v.d.TelemetryInfo.getField(h,'storysetRepeatCounterLabel',0),'storyset repeat'),STORY_NAME:new HistogramGrouping('story',h=>tr.v.d.TelemetryInfo.getField(h,'storyDisplayName','')),LEGACY_TIR_LABEL:new HistogramGrouping('tir',h=>tr.v.d.TelemetryInfo.getField(h,'legacyTIRLabel','')),MASTER_NAME:new HistogramGrouping('master',h=>tr.v.d.BuildbotInfo.getField(h,'buildbotMasterName','')),SLAVE_NAME:new HistogramGrouping('bot',h=>tr.v.d.BuildbotInfo.getField(h,'buildbotName','')),BUILD_NUMBER:new HistogramGrouping('build',h=>tr.v.d.BuildbotInfo.getField(h,'buildNumber','')),DISPLAY_LABEL:new HistogramGrouping('label',h=>tr.v.d.TelemetryInfo.getField(h,'displayLabel','Value'))};return{HistogramGrouping,HistogramSet,};});'use strict';tr.exportTo('tr.e.chrome',function(){function hasTitleAndCategory(event,title,category){return event.title===title&&event.category&&tr.b.getCategoryParts(event.category).includes(category);} function getNavStartTimestamps(rendererHelper){const navStartTimestamps=[];for(const e of rendererHelper.mainThread.sliceGroup.childEvents()){if(hasTitleAndCategory(e,'navigationStart','blink.user_timing')){navStartTimestamps.push(e.start);}} return navStartTimestamps;} function getInteractiveTimestamps(model){const interactiveTimestampsMap=new Map();const chromeHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);for(const rendererHelper of Object.values(chromeHelper.rendererHelpers)){const timestamps=[];interactiveTimestampsMap.set(rendererHelper.pid,timestamps);const samples=tr.metrics.sh.collectLoadingMetricsForRenderer(rendererHelper).firstInteractiveSamples;for(const sample of samples){timestamps.push(sample.diagnostics['Navigation infos'].value.interactive);}} @@ -8864,8 +8864,8 @@ const chartData={x:0};for(const row of tableRows){if(row.numberValue===undefined)continue;row.tableSum=tableSum;chartData[row.name]=row.numberValue;const dataSeries=this.chart_.getDataSeries(row.name);dataSeries.color=row.color;dataSeries.highlightedColor=row.highlightedColor;} if(tableRows.length>0){this.$.table.style.display='block';this.$.empty.style.display='none';this.$.table.tableRows=tableRows;this.$.table.rebuild();} if(Object.keys(chartData).length>1){this.$.container.style.display='block';this.$.empty.style.display='none';this.chart_.data=[chartData];}}});'use strict';Polymer({is:'tr-v-ui-buildbot-info-span',ready(){this.diagnostic_=undefined;this.$.table.showHeader=false;this.$.table.tableColumns=[{value:row=>row[0]},{value:row=>row[1]}];},get diagnostic(){return this.diagnostic_;},set diagnostic(d){this.diagnostic_=d;this.updateContents_();},updateContents_(){if(this.diagnostic===undefined){this.$.table.tableRows=[];return;} -const rows=[];if(this.diagnostic.displayMasterName){rows.push(['primary',this.diagnostic.displayMasterName]);} -if(this.diagnostic.buildbotMasterName){rows.push(['primary',this.diagnostic.buildbotMasterName]);} +const rows=[];if(this.diagnostic.displayMasterName){rows.push(['master',this.diagnostic.displayMasterName]);} +if(this.diagnostic.buildbotMasterName){rows.push(['master',this.diagnostic.buildbotMasterName]);} if(this.diagnostic.displayBotName){rows.push(['bot',this.diagnostic.displayBotName]);} if(this.diagnostic.buildbotName){rows.push(['bot',this.diagnostic.buildbotName]);} if(this.diagnostic.buildNumber){rows.push(['build number',this.diagnostic.buildNumber]);} @@ -8881,7 +8881,7 @@ if(this.diagnostic.ram){rows.push(['ram',tr.b.Unit.byName.sizeInBytes.format(this.diagnostic.ram)]);} this.$.table.tableRows=rows;}});'use strict';Polymer({is:'tr-v-ui-generic-diagnostic-span',ready(){this.diagnostic_=undefined;},get diagnostic(){return this.diagnostic_;},set diagnostic(d){this.diagnostic_=d;this.updateContents_();},updateContents_(){if(this.diagnostic===undefined){this.$.generic.object=undefined;return;} this.$.generic.object=this.diagnostic.value;}});'use strict';Polymer({is:'tr-v-ui-merged-buildbot-info-span',ready(){this.diagnostic_=undefined;this.$.table.showHeader=false;this.$.table.tableColumns=[{value:row=>row[0]},{value:row=>row[1]},];},get diagnostic(){return this.diagnostic_;},set diagnostic(d){this.diagnostic_=d;this.updateContents_();},updateContents_(){if(this.diagnostic===undefined){this.$.table.tableRows=[];return;} -const rows=[];if(this.diagnostic.displayMasterNames.size){rows.push(['primarys',Array.from(this.diagnostic.displayMasterNames).join(', ')]);} +const rows=[];if(this.diagnostic.displayMasterNames.size){rows.push(['masters',Array.from(this.diagnostic.displayMasterNames).join(', ')]);} if(this.diagnostic.displayBotNames.size){rows.push(['bots',Array.from(this.diagnostic.displayBotNames).join(', ')]);} if(this.diagnostic.buildNumbers.size){rows.push(['builds',Array.from(this.diagnostic.buildNumbers).join(', ')]);} for(const logUri of this.diagnostic.logUris){const anchor=document.createElement('a');anchor.href=logUri;anchor.innerText=logUri;rows.push(['log',anchor]);} @@ -10126,10 +10126,10 @@ root 517 2 0 0 irq_thread 0 S [irq/128-arm-smm] 5 root 518 2 0 0 rescuer_thread 0 S [sb-1] 5 root 519 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread1] 5 -root 520 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl1] 5 +root 520 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl1] 5 root 521 2 0 0 rescuer_thread 0 S [sb-3] 5 root 522 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread3] 5 -root 523 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl3] 5 +root 523 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl3] 5 root 525 2 0 0 sensor_sysfs_notify 0 S [therm_core:noti] 5 root 526 2 0 0 sensor_sysfs_notify 0 S [therm_core:noti] 5 root 527 2 0 0 sensor_sysfs_notify 0 S [therm_core:noti] 5 @@ -10163,8 +10163,8 @@ root 584 2 0 0 kauditd_thread 0 S [kauditd] 5 root 586 1 14496 2612 binder_ioctl_write_read 7f49272040 S [email protected] [email protected] system 587 1 15624 2752 binder_ioctl_write_read 7929071040 S [email protected] [email protected] -system 588 1 15884 2740 binder_ioctl_write_read 7496432040 S [email protected] [email protected] -system 589 1 2128880 2824 binder_ioctl_write_read 7aa5cf0040 S [email protected] [email protected] +system 588 1 15884 2740 binder_ioctl_write_read 7496432040 S [email protected] [email protected] +system 589 1 2128880 2824 binder_ioctl_write_read 7aa5cf0040 S [email protected] [email protected] system 591 1 2190076 20836 SyS_epoll_wait 7d3a2f0f50 S surfaceflinger surfaceflinger system 593 1 47296 5936 binder_ioctl_write_read 79c60f0040 S [email protected] [email protected] system 594 1 18484 2704 binder_ioctl_write_read 79f41b0040 S [email protected] [email protected] @@ -10856,8 +10856,8 @@ root 584 584 kauditd root 586 586 [email protected] system 587 587 [email protected] -system 588 588 [email protected] -system 589 589 [email protected] +system 588 588 [email protected] +system 589 589 [email protected] system 589 604 HwBinder:589_1 system 591 591 surfaceflinger system 591 620 HwBinder:591_1 @@ -12843,10 +12843,10 @@ root 517 2 0 0 irq_thread 0 S [irq/128-arm-smm] 5 root 518 2 0 0 rescuer_thread 0 S [sb-1] 5 root 519 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread1] 5 -root 520 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl1] 5 +root 520 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl1] 5 root 521 2 0 0 rescuer_thread 0 S [sb-3] 5 root 522 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread3] 5 -root 523 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl3] 5 +root 523 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl3] 5 root 525 2 0 0 sensor_sysfs_notify 0 S [therm_core:noti] 5 root 526 2 0 0 sensor_sysfs_notify 0 S [therm_core:noti] 5 root 527 2 0 0 sensor_sysfs_notify 0 S [therm_core:noti] 5 @@ -12880,8 +12880,8 @@ root 584 2 0 0 kauditd_thread 0 S [kauditd] 5 root 586 1 14496 2604 binder_ioctl_write_read 7f49272040 S [email protected] [email protected] system 587 1 15624 2592 binder_ioctl_write_read 7929071040 S [email protected] [email protected] -system 588 1 15884 2624 binder_ioctl_write_read 7496432040 S [email protected] [email protected] -system 589 1 2128880 2628 binder_ioctl_write_read 7aa5cf0040 S [email protected] [email protected] +system 588 1 15884 2624 binder_ioctl_write_read 7496432040 S [email protected] [email protected] +system 589 1 2128880 2628 binder_ioctl_write_read 7aa5cf0040 S [email protected] [email protected] system 591 1 2190084 20996 SyS_epoll_wait 7d3a2f0f50 S surfaceflinger surfaceflinger system 593 1 47308 5784 binder_ioctl_write_read 79c60f0040 S [email protected] [email protected] system 594 1 18484 2708 binder_ioctl_write_read 79f41b0040 S [email protected] [email protected] @@ -13577,8 +13577,8 @@ root 584 584 kauditd root 586 586 [email protected] system 587 587 [email protected] -system 588 588 [email protected] -system 589 589 [email protected] +system 588 588 [email protected] +system 589 589 [email protected] system 589 604 HwBinder:589_1 system 591 591 surfaceflinger system 591 620 HwBinder:591_1 @@ -26875,18 +26875,18 @@ atrace-27096 (27096) [005] ...1 1034723.481597: tracing_mark_write: B|27096|HIDL::IBase::notifySyspropsChanged::client atrace-27096 (27096) [005] ...1 1034723.481637: tracing_mark_write: E|27096 atrace-27096 (27096) [005] ...1 1034723.481648: tracing_mark_write: B|27096|HIDL::IServiceManager::get::client - [email protected] ( 588) [006] ...1 1034723.481686: tracing_mark_write: B|588|HIDL::IBase::notifySyspropsChanged::server + [email protected] ( 588) [006] ...1 1034723.481686: tracing_mark_write: B|588|HIDL::IBase::notifySyspropsChanged::server hwservicemanag-570 ( 570) [002] ...1 1034723.481693: tracing_mark_write: B|570|HIDL::IServiceManager::get::server hwservicemanag-570 ( 570) [002] ...1 1034723.481813: tracing_mark_write: E|570 - [email protected] ( 588) [006] ...1 1034723.481829: tracing_mark_write: E|588 + [email protected] ( 588) [006] ...1 1034723.481829: tracing_mark_write: E|588 atrace-27096 (27096) [005] ...1 1034723.481857: tracing_mark_write: E|27096 atrace-27096 (27096) [005] ...1 1034723.481862: tracing_mark_write: B|27096|HIDL::IBase::notifySyspropsChanged::client atrace-27096 (27096) [005] ...1 1034723.481899: tracing_mark_write: E|27096 atrace-27096 (27096) [005] ...1 1034723.481910: tracing_mark_write: B|27096|HIDL::IServiceManager::get::client - [email protected] ( 589) [007] ...1 1034723.481945: tracing_mark_write: B|589|HIDL::IBase::notifySyspropsChanged::server + [email protected] ( 589) [007] ...1 1034723.481945: tracing_mark_write: B|589|HIDL::IBase::notifySyspropsChanged::server hwservicemanag-570 ( 570) [002] ...1 1034723.481953: tracing_mark_write: B|570|HIDL::IServiceManager::get::server hwservicemanag-570 ( 570) [002] ...1 1034723.482041: tracing_mark_write: E|570 - [email protected] ( 589) [007] ...1 1034723.482081: tracing_mark_write: E|589 + [email protected] ( 589) [007] ...1 1034723.482081: tracing_mark_write: E|589 atrace-27096 (27096) [005] ...1 1034723.482083: tracing_mark_write: E|27096 atrace-27096 (27096) [005] ...1 1034723.482087: tracing_mark_write: B|27096|HIDL::IBase::notifySyspropsChanged::client atrace-27096 (27096) [005] ...1 1034723.482128: tracing_mark_write: E|27096
diff --git a/tools/systrace_parser/parser/test/omr1_incomplete.html b/tools/systrace_parser/parser/test/omr1_incomplete.html index 4f87ff1..e8966fe 100755 --- a/tools/systrace_parser/parser/test/omr1_incomplete.html +++ b/tools/systrace_parser/parser/test/omr1_incomplete.html
@@ -4928,7 +4928,7 @@ visitedDomainIds.add(current.domainId);const outgoingTransformers=this.transformerMapByDomainId_[current.domainId];if(!outgoingTransformers)continue;for(const outgoingDomainId in outgoingTransformers){const toNextDomainTransformer=outgoingTransformers[outgoingDomainId];const toCurrentDomainTransformer=current.transformer;queue.push({domainId:outgoingDomainId,transformer:Transformer.compose(toNextDomainTransformer,toCurrentDomainTransformer)});}} return undefined;},selectModelDomainId_(){this.ensureAllDomainsAreConnected_();for(const chromeDomainId of POSSIBLE_CHROME_CLOCK_DOMAINS){if(this.domainsSeen_.has(chromeDomainId)){this.modelDomainId_=chromeDomainId;return;}} const domainsSeenArray=Array.from(this.domainsSeen_);domainsSeenArray.sort();this.modelDomainId_=domainsSeenArray[0];},ensureAllDomainsAreConnected_(){let firstDomainId=undefined;for(const domainId of this.domainsSeen_){if(!firstDomainId){firstDomainId=domainId;continue;} -if(!this.getTransformerBetween_(firstDomainId,domainId)){throw new Error('Unable to select a primary clock domain because no '+'path can be found from "'+firstDomainId+'" to "'+domainId+'".');}} +if(!this.getTransformerBetween_(firstDomainId,domainId)){throw new Error('Unable to select a master clock domain because no '+'path can be found from "'+firstDomainId+'" to "'+domainId+'".');}} return true;},onDomainSeen_(domainId){if(domainId===ClockDomainId.UNKNOWN_CHROME_LEGACY&&!this.domainsSeen_.has(ClockDomainId.UNKNOWN_CHROME_LEGACY)){for(const chromeDomainId of POSSIBLE_CHROME_CLOCK_DOMAINS){if(chromeDomainId===ClockDomainId.UNKNOWN_CHROME_LEGACY){continue;} this.collapseDomains_(ClockDomainId.UNKNOWN_CHROME_LEGACY,chromeDomainId);}} this.domainsSeen_.add(domainId);},onSyncCompleted_(marker1,marker2){const forwardTransformer=Transformer.fromMarkers(marker1,marker2);const backwardTransformer=Transformer.fromMarkers(marker2,marker1);const existingTransformer=this.getOrCreateTransformerMap_(marker1.domainId)[marker2.domainId];if(!existingTransformer||forwardTransformer.error<existingTransformer.error){this.getOrCreateTransformerMap_(marker1.domainId)[marker2.domainId]=forwardTransformer;this.getOrCreateTransformerMap_(marker2.domainId)[marker1.domainId]=backwardTransformer;}},collapseDomains_(domain1Id,domain2Id){this.getOrCreateTransformerMap_(domain1Id)[domain2Id]=this.getOrCreateTransformerMap_(domain2Id)[domain1Id]=Transformer.IDENTITY;},getOrCreateTransformerMap_(domainId){if(!this.transformerMapByDomainId_[domainId]){this.transformerMapByDomainId_[domainId]={};} @@ -5311,7 +5311,7 @@ readToOffset_(offset){const out=InMemoryTraceStream.uint8ArrayToString_(this.data_.subarray(this.cursor_,offset));this.cursor_=offset;return out;} static uint8ArrayToString_(arr){const c=[];for(let i=0;i<arr.length;i+=MAX_FUNCTION_ARGS_COUNT){c.push(String.fromCharCode(...arr.subarray(i,i+MAX_FUNCTION_ARGS_COUNT)));} return c.join('');}} -return{InMemoryTraceStream,};});!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,f,g,h,i,j="",k=0;k<a.length;)b=a.charCodeAt(k++),c=a.charCodeAt(k++),e=a.charCodeAt(k++),f=b>>2,g=(3&b)<<4|c>>4,h=(15&c)<<2|e>>6,i=63&e,isNaN(c)?h=i=64:isNaN(e)&&(i=64),j=j+d.charAt(f)+d.charAt(g)+d.charAt(h)+d.charAt(i);return j},c.decode=function(a){var b,c,e,f,g,h,i,j="",k=0;for(a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");k<a.length;)f=d.indexOf(a.charAt(k++)),g=d.indexOf(a.charAt(k++)),h=d.indexOf(a.charAt(k++)),i=d.indexOf(a.charAt(k++)),b=f<<2|g>>4,c=(15&g)<<4|h>>2,e=(3&h)<<6|i,j+=String.fromCharCode(b),64!=h&&(j+=String.fromCharCode(c)),64!=i&&(j+=String.fromCharCode(e));return j}},{}],2:[function(a,b){"use strict";function c(){this.compressedSize=0,this.uncompressedSize=0,this.crc32=0,this.compressionMethod=null,this.compressedContent=null}c.prototype={getContent:function(){return null},getCompressedContent:function(){return null}},b.exports=c},{}],3:[function(a,b,c){"use strict";c.STORE={magic:"\x00\x00",compress:function(a){return a},uncompress:function(a){return a},compressInputType:null,uncompressInputType:null},c.DEFLATE=a("./flate")},{"./flate":8}],4:[function(a,b){"use strict";var c=a("./utils"),d=[0,1996959894,3993919788,2567524794,124634137,1886057615,3915621685,2657392035,249268274,2044508324,3772115230,2547177864,162941995,2125561021,3887607047,2428444049,498536548,1789927666,4089016648,2227061214,450548861,1843258603,4107580753,2211677639,325883990,1684777152,4251122042,2321926636,335633487,1661365465,4195302755,2366115317,997073096,1281953886,3579855332,2724688242,1006888145,1258607687,3524101629,2768942443,901097722,1119000684,3686517206,2898065728,853044451,1172266101,3705015759,2882616665,651767980,1373503546,3369554304,3218104598,565507253,1454621731,3485111705,3099436303,671266974,1594198024,3322730930,2970347812,795835527,1483230225,3244367275,3060149565,1994146192,31158534,2563907772,4023717930,1907459465,112637215,2680153253,3904427059,2013776290,251722036,2517215374,3775830040,2137656763,141376813,2439277719,3865271297,1802195444,476864866,2238001368,4066508878,1812370925,453092731,2181625025,4111451223,1706088902,314042704,2344532202,4240017532,1658658271,366619977,2362670323,4224994405,1303535960,984961486,2747007092,3569037538,1256170817,1037604311,2765210733,3554079995,1131014506,879679996,2909243462,3663771856,1141124467,855842277,2852801631,3708648649,1342533948,654459306,3188396048,3373015174,1466479909,544179635,3110523913,3462522015,1591671054,702138776,2966460450,3352799412,1504918807,783551873,3082640443,3233442989,3988292384,2596254646,62317068,1957810842,3939845945,2647816111,81470997,1943803523,3814918930,2489596804,225274430,2053790376,3826175755,2466906013,167816743,2097651377,4027552580,2265490386,503444072,1762050814,4150417245,2154129355,426522225,1852507879,4275313526,2312317920,282753626,1742555852,4189708143,2394877945,397917763,1622183637,3604390888,2714866558,953729732,1340076626,3518719985,2797360999,1068828381,1219638859,3624741850,2936675148,906185462,1090812512,3747672003,2825379669,829329135,1181335161,3412177804,3160834842,628085408,1382605366,3423369109,3138078467,570562233,1426400815,3317316542,2998733608,733239954,1555261956,3268935591,3050360625,752459403,1541320221,2607071920,3965973030,1969922972,40735498,2617837225,3943577151,1913087877,83908371,2512341634,3803740692,2075208622,213261112,2463272603,3855990285,2094854071,198958881,2262029012,4057260610,1759359992,534414190,2176718541,4139329115,1873836001,414664567,2282248934,4279200368,1711684554,285281116,2405801727,4167216745,1634467795,376229701,2685067896,3608007406,1308918612,956543938,2808555105,3495958263,1231636301,1047427035,2932959818,3654703836,1088359270,936918e3,2847714899,3736837829,1202900863,817233897,3183342108,3401237130,1404277552,615818150,3134207493,3453421203,1423857449,601450431,3009837614,3294710456,1567103746,711928724,3020668471,3272380065,1510334235,755167117];b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var e="string"!==c.getTypeOf(a);"undefined"==typeof b&&(b=0);var f=0,g=0,h=0;b=-1^b;for(var i=0,j=a.length;j>i;i++)h=e?a[i]:a.charCodeAt(i),g=255&(b^h),f=d[g],b=b>>>8^f;return-1^b}},{"./utils":21}],5:[function(a,b){"use strict";function c(){this.data=null,this.length=0,this.index=0}var d=a("./utils");c.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<a||0>a)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return d.transformTo("string",this.readData(a))},readData:function(){},lastIndexOfSignature:function(){},readDate:function(){var a=this.readInt(4);return new Date((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1)}},b.exports=c},{"./utils":21}],6:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!1,c.date=null,c.compression=null,c.comment=null},{}],7:[function(a,b,c){"use strict";var d=a("./utils");c.string2binary=function(a){return d.string2binary(a)},c.string2Uint8Array=function(a){return d.transformTo("uint8array",a)},c.uint8Array2String=function(a){return d.transformTo("string",a)},c.string2Blob=function(a){var b=d.transformTo("arraybuffer",a);return d.arrayBuffer2Blob(b)},c.arrayBuffer2Blob=function(a){return d.arrayBuffer2Blob(a)},c.transformTo=function(a,b){return d.transformTo(a,b)},c.getTypeOf=function(a){return d.getTypeOf(a)},c.checkSupport=function(a){return d.checkSupport(a)},c.MAX_VALUE_16BITS=d.MAX_VALUE_16BITS,c.MAX_VALUE_32BITS=d.MAX_VALUE_32BITS,c.pretty=function(a){return d.pretty(a)},c.findCompression=function(a){return d.findCompression(a)},c.isRegExp=function(a){return d.isRegExp(a)}},{"./utils":21}],8:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,e=a("pako");c.uncompressInputType=d?"uint8array":"array",c.compressInputType=d?"uint8array":"array",c.magic="\b\x00",c.compress=function(a){return e.deflateRaw(a)},c.uncompress=function(a){return e.inflateRaw(a)}},{pako:24}],9:[function(a,b){"use strict";function c(a,b){return this instanceof c?(this.files={},this.comment=null,this.root="",a&&this.load(a,b),void(this.clone=function(){var a=new c;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a})):new c(a,b)}var d=a("./base64");c.prototype=a("./object"),c.prototype.load=a("./load"),c.support=a("./support"),c.defaults=a("./defaults"),c.utils=a("./deprecatedPublicUtils"),c.base64={encode:function(a){return d.encode(a)},decode:function(a){return d.decode(a)}},c.compressions=a("./compressions"),b.exports=c},{"./base64":1,"./compressions":3,"./defaults":6,"./deprecatedPublicUtils":7,"./load":10,"./object":13,"./support":17}],10:[function(a,b){"use strict";var c=a("./base64"),d=a("./zipEntries");b.exports=function(a,b){var e,f,g,h;for(b=b||{},b.base64&&(a=c.decode(a)),f=new d(a,b),e=f.files,g=0;g<e.length;g++)h=e[g],this.file(h.fileName,h.decompressed,{binary:!0,optimizedBinaryString:!0,date:h.date,dir:h.dir,comment:h.fileComment.length?h.fileComment:null,createFolders:b.createFolders});return f.zipComment.length&&(this.comment=f.zipComment),this}},{"./base64":1,"./zipEntries":22}],11:[function(a,b){(function(a){"use strict";b.exports=function(b,c){return new a(b,c)},b.exports.test=function(b){return a.isBuffer(b)}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],12:[function(a,b){"use strict";function c(a){this.data=a,this.length=this.data.length,this.index=0}var d=a("./uint8ArrayReader");c.prototype=new d,c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./uint8ArrayReader":18}],13:[function(a,b){"use strict";var c=a("./support"),d=a("./utils"),e=a("./crc32"),f=a("./signature"),g=a("./defaults"),h=a("./base64"),i=a("./compressions"),j=a("./compressedObject"),k=a("./nodeBuffer"),l=a("./utf8"),m=a("./stringWriter"),n=a("./uint8ArrayWriter"),o=function(a){if(a._data instanceof j&&(a._data=a._data.getContent(),a.options.binary=!0,a.options.base64=!1,"uint8array"===d.getTypeOf(a._data))){var b=a._data;a._data=new Uint8Array(b.length),0!==b.length&&a._data.set(b,0)}return a._data},p=function(a){var b=o(a),e=d.getTypeOf(b);return"string"===e?!a.options.binary&&c.nodebuffer?k(b,"utf-8"):a.asBinary():b},q=function(a){var b=o(this);return null===b||"undefined"==typeof b?"":(this.options.base64&&(b=h.decode(b)),b=a&&this.options.binary?A.utf8decode(b):d.transformTo("string",b),a||this.options.binary||(b=d.transformTo("string",A.utf8encode(b))),b)},r=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this._data=b,this.options=c,this._initialMetadata={dir:c.dir,date:c.date}};r.prototype={asText:function(){return q.call(this,!0)},asBinary:function(){return q.call(this,!1)},asNodeBuffer:function(){var a=p(this);return d.transformTo("nodebuffer",a)},asUint8Array:function(){var a=p(this);return d.transformTo("uint8array",a)},asArrayBuffer:function(){return this.asUint8Array().buffer}};var s=function(a,b){var c,d="";for(c=0;b>c;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},t=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},u=function(a){return a=a||{},a.base64!==!0||null!==a.binary&&void 0!==a.binary||(a.binary=!0),a=t(a,g),a.date=a.date||new Date,null!==a.compression&&(a.compression=a.compression.toUpperCase()),a},v=function(a,b,c){var e,f=d.getTypeOf(b);if(c=u(c),c.createFolders&&(e=w(a))&&x.call(this,e,!0),c.dir||null===b||"undefined"==typeof b)c.base64=!1,c.binary=!1,b=null;else if("string"===f)c.binary&&!c.base64&&c.optimizedBinaryString!==!0&&(b=d.string2binary(b));else{if(c.base64=!1,c.binary=!0,!(f||b instanceof j))throw new Error("The data of '"+a+"' is in an unsupported format !");"arraybuffer"===f&&(b=d.transformTo("uint8array",b))}var g=new r(a,b,c);return this.files[a]=g,g},w=function(a){"/"==a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},x=function(a,b){return"/"!=a.slice(-1)&&(a+="/"),b="undefined"!=typeof b?b:!1,this.files[a]||v.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},y=function(a,b){var c,f=new j;return a._data instanceof j?(f.uncompressedSize=a._data.uncompressedSize,f.crc32=a._data.crc32,0===f.uncompressedSize||a.dir?(b=i.STORE,f.compressedContent="",f.crc32=0):a._data.compressionMethod===b.magic?f.compressedContent=a._data.getCompressedContent():(c=a._data.getContent(),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c)))):(c=p(a),(!c||0===c.length||a.dir)&&(b=i.STORE,c=""),f.uncompressedSize=c.length,f.crc32=e(c),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c))),f.compressedSize=f.compressedContent.length,f.compressionMethod=b.magic,f},z=function(a,b,c,g){var h,i,j,k,m=(c.compressedContent,d.transformTo("string",l.utf8encode(b.name))),n=b.comment||"",o=d.transformTo("string",l.utf8encode(n)),p=m.length!==b.name.length,q=o.length!==n.length,r=b.options,t="",u="",v="";j=b._initialMetadata.dir!==b.dir?b.dir:r.dir,k=b._initialMetadata.date!==b.date?b.date:r.date,h=k.getHours(),h<<=6,h|=k.getMinutes(),h<<=5,h|=k.getSeconds()/2,i=k.getFullYear()-1980,i<<=4,i|=k.getMonth()+1,i<<=5,i|=k.getDate(),p&&(u=s(1,1)+s(e(m),4)+m,t+="up"+s(u.length,2)+u),q&&(v=s(1,1)+s(this.crc32(o),4)+o,t+="uc"+s(v.length,2)+v);var w="";w+="\n\x00",w+=p||q?"\x00\b":"\x00\x00",w+=c.compressionMethod,w+=s(h,2),w+=s(i,2),w+=s(c.crc32,4),w+=s(c.compressedSize,4),w+=s(c.uncompressedSize,4),w+=s(m.length,2),w+=s(t.length,2);var x=f.LOCAL_FILE_HEADER+w+m+t,y=f.CENTRAL_FILE_HEADER+"\x00"+w+s(o.length,2)+"\x00\x00\x00\x00"+(j===!0?"\x00\x00\x00":"\x00\x00\x00\x00")+s(g,4)+m+t+o;return{fileRecord:x,dirRecord:y,compressedObject:c}},A={load:function(){throw new Error("Load method is not defined. Is the file jszip-load.js included ?")},filter:function(a){var b,c,d,e,f=[];for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],e=new r(d.name,d._data,t(d.options)),c=b.slice(this.root.length,b.length),b.slice(0,this.root.length)===this.root&&a(c,e)&&f.push(e));return f},file:function(a,b,c){if(1===arguments.length){if(d.isRegExp(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}return this.filter(function(b,c){return!c.dir&&b===a})[0]||null}return a=this.root+a,v.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d.isRegExp(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=x.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!=a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){a=t(a||{},{base64:!0,compression:"STORE",type:"base64",comment:null}),d.checkSupport(a.type);var b,c,e=[],g=0,j=0,k=d.transformTo("string",this.utf8encode(a.comment||this.comment||""));for(var l in this.files)if(this.files.hasOwnProperty(l)){var o=this.files[l],p=o.options.compression||a.compression.toUpperCase(),q=i[p];if(!q)throw new Error(p+" is not a valid compression method !");var r=y.call(this,o,q),u=z.call(this,l,o,r,g);g+=u.fileRecord.length+r.compressedSize,j+=u.dirRecord.length,e.push(u)}var v="";v=f.CENTRAL_DIRECTORY_END+"\x00\x00\x00\x00"+s(e.length,2)+s(e.length,2)+s(j,4)+s(g,4)+s(k.length,2)+k;var w=a.type.toLowerCase();for(b="uint8array"===w||"arraybuffer"===w||"blob"===w||"nodebuffer"===w?new n(g+j+v.length):new m(g+j+v.length),c=0;c<e.length;c++)b.append(e[c].fileRecord),b.append(e[c].compressedObject.compressedContent);for(c=0;c<e.length;c++)b.append(e[c].dirRecord);b.append(v);var x=b.finalize();switch(a.type.toLowerCase()){case"uint8array":case"arraybuffer":case"nodebuffer":return d.transformTo(a.type.toLowerCase(),x);case"blob":return d.arrayBuffer2Blob(d.transformTo("arraybuffer",x));case"base64":return a.base64?h.encode(x):x;default:return x}},crc32:function(a,b){return e(a,b)},utf8encode:function(a){return d.transformTo("string",l.utf8encode(a))},utf8decode:function(a){return l.utf8decode(a)}};b.exports=A},{"./base64":1,"./compressedObject":2,"./compressions":3,"./crc32":4,"./defaults":6,"./nodeBuffer":11,"./signature":14,"./stringWriter":16,"./support":17,"./uint8ArrayWriter":19,"./utf8":20,"./utils":21}],14:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],15:[function(a,b){"use strict";function c(a,b){this.data=a,b||(this.data=e.string2binary(this.data)),this.length=this.data.length,this.index=0}var d=a("./dataReader"),e=a("./utils");c.prototype=new d,c.prototype.byteAt=function(a){return this.data.charCodeAt(a)},c.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)},c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5,"./utils":21}],16:[function(a,b){"use strict";var c=a("./utils"),d=function(){this.data=[]};d.prototype={append:function(a){a=c.transformTo("string",a),this.data.push(a)},finalize:function(){return this.data.join("")}},b.exports=d},{"./utils":21}],17:[function(a,b,c){(function(a){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof a,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var b=new ArrayBuffer(0);try{c.blob=0===new Blob([b],{type:"application/zip"}).size}catch(d){try{var e=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,f=new e;f.append(b),c.blob=0===f.getBlob("application/zip").size}catch(d){c.blob=!1}}}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],18:[function(a,b){"use strict";function c(a){a&&(this.data=a,this.length=this.data.length,this.index=0)}var d=a("./dataReader");c.prototype=new d,c.prototype.byteAt=function(a){return this.data[a]},c.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f;return-1},c.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5}],19:[function(a,b){"use strict";var c=a("./utils"),d=function(a){this.data=new Uint8Array(a),this.index=0};d.prototype={append:function(a){0!==a.length&&(a=c.transformTo("uint8array",a),this.data.set(a,this.index),this.index+=a.length)},finalize:function(){return this.data}},b.exports=d},{"./utils":21}],20:[function(a,b,c){"use strict";for(var d=a("./utils"),e=a("./support"),f=a("./nodeBuffer"),g=new Array(256),h=0;256>h;h++)g[h]=h>=252?6:h>=248?5:h>=240?4:h>=224?3:h>=192?2:1;g[254]=g[254]=1;var i=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=e.uint8array?new Uint8Array(i):new Array(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},j=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+g[a[c]]>b?c:b},k=function(a){var b,c,e,f,h=a.length,i=new Array(2*h);for(c=0,b=0;h>b;)if(e=a[b++],128>e)i[c++]=e;else if(f=g[e],f>4)i[c++]=65533,b+=f-1;else{for(e&=2===f?31:3===f?15:7;f>1&&h>b;)e=e<<6|63&a[b++],f--;f>1?i[c++]=65533:65536>e?i[c++]=e:(e-=65536,i[c++]=55296|e>>10&1023,i[c++]=56320|1023&e)}return i.length!==c&&(i.subarray?i=i.subarray(0,c):i.length=c),d.applyFromCharCode(i)};c.utf8encode=function(a){return e.nodebuffer?f(a,"utf-8"):i(a)},c.utf8decode=function(a){if(e.nodebuffer)return d.transformTo("nodebuffer",a).toString("utf-8");a=d.transformTo(e.uint8array?"uint8array":"array",a);for(var b=[],c=0,f=a.length,g=65536;f>c;){var h=j(a,Math.min(c+g,f));b.push(e.uint8array?k(a.subarray(c,h)):k(a.slice(c,h))),c=h}return b.join("")}},{"./nodeBuffer":11,"./support":17,"./utils":21}],21:[function(a,b,c){"use strict";function d(a){return a}function e(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function f(a){var b=65536,d=[],e=a.length,f=c.getTypeOf(a),g=0,h=!0;try{switch(f){case"uint8array":String.fromCharCode.apply(null,new Uint8Array(0));break;case"nodebuffer":String.fromCharCode.apply(null,j(0))}}catch(i){h=!1}if(!h){for(var k="",l=0;l<a.length;l++)k+=String.fromCharCode(a[l]);return k}for(;e>g&&b>1;)try{d.push("array"===f||"nodebuffer"===f?String.fromCharCode.apply(null,a.slice(g,Math.min(g+b,e))):String.fromCharCode.apply(null,a.subarray(g,Math.min(g+b,e)))),g+=b}catch(i){b=Math.floor(b/2)}return d.join("")}function g(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];return b}var h=a("./support"),i=a("./compressions"),j=a("./nodeBuffer");c.string2binary=function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(255&a.charCodeAt(c));return b},c.arrayBuffer2Blob=function(a){c.checkSupport("blob");try{return new Blob([a],{type:"application/zip"})}catch(b){try{var d=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,e=new d;return e.append(a),e.getBlob("application/zip")}catch(b){throw new Error("Bug : can't construct the Blob.")}}},c.applyFromCharCode=f;var k={};k.string={string:d,array:function(a){return e(a,new Array(a.length))},arraybuffer:function(a){return k.string.uint8array(a).buffer},uint8array:function(a){return e(a,new Uint8Array(a.length))},nodebuffer:function(a){return e(a,j(a.length))}},k.array={string:f,array:d,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(a)}},k.arraybuffer={string:function(a){return f(new Uint8Array(a))},array:function(a){return g(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:d,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(new Uint8Array(a))}},k.uint8array={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:d,nodebuffer:function(a){return j(a)}},k.nodebuffer={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return k.nodebuffer.uint8array(a).buffer},uint8array:function(a){return g(a,new Uint8Array(a.length))},nodebuffer:d},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=k[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":h.nodebuffer&&j.test(a)?"nodebuffer":h.uint8array&&a instanceof Uint8Array?"uint8array":h.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=h[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this browser")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(16>b?"0":"")+b.toString(16).toUpperCase();return d},c.findCompression=function(a){for(var b in i)if(i.hasOwnProperty(b)&&i[b].magic===a)return i[b];return null},c.isRegExp=function(a){return"[object RegExp]"===Object.prototype.toString.call(a)}},{"./compressions":3,"./nodeBuffer":11,"./support":17}],22:[function(a,b){"use strict";function c(a,b){this.files=[],this.loadOptions=b,a&&this.load(a)}var d=a("./stringReader"),e=a("./nodeBufferReader"),f=a("./uint8ArrayReader"),g=a("./utils"),h=a("./signature"),i=a("./zipEntry"),j=a("./support"),k=a("./object");c.prototype={checkSignature:function(a){var b=this.reader.readString(4);if(b!==a)throw new Error("Corrupted zip or bug : unexpected signature ("+g.pretty(b)+", expected "+g.pretty(a)+")")},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2),this.zipComment=this.reader.readString(this.zipCommentLength),this.zipComment=k.utf8decode(this.zipComment)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.versionMadeBy=this.reader.readString(2),this.versionNeeded=this.reader.readInt(2),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;d>e;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readString(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(h.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readString(4)===h.CENTRAL_FILE_HEADER;)a=new i({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(h.CENTRAL_DIRECTORY_END);if(-1===a)throw new Error("Corrupted zip : can't find end of central directory");if(this.reader.setIndex(a),this.checkSignature(h.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===g.MAX_VALUE_16BITS||this.diskWithCentralDirStart===g.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===g.MAX_VALUE_16BITS||this.centralDirRecords===g.MAX_VALUE_16BITS||this.centralDirSize===g.MAX_VALUE_32BITS||this.centralDirOffset===g.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),-1===a)throw new Error("Corrupted zip : can't find the ZIP64 end of central directory locator");this.reader.setIndex(a),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}},prepareReader:function(a){var b=g.getTypeOf(a);this.reader="string"!==b||j.uint8array?"nodebuffer"===b?new e(a):new f(g.transformTo("uint8array",a)):new d(a,this.loadOptions.optimizedBinaryString)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=c},{"./nodeBufferReader":12,"./object":13,"./signature":14,"./stringReader":15,"./support":17,"./uint8ArrayReader":18,"./utils":21,"./zipEntry":23}],23:[function(a,b){"use strict";function c(a,b){this.options=a,this.loadOptions=b}var d=a("./stringReader"),e=a("./utils"),f=a("./compressedObject"),g=a("./object");c.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},prepareCompressedContent:function(a,b,c){return function(){var d=a.index;a.setIndex(b);var e=a.readData(c);return a.setIndex(d),e}},prepareContent:function(a,b,c,d,f){return function(){var a=e.transformTo(d.uncompressInputType,this.getCompressedContent()),b=d.uncompress(a);if(b.length!==f)throw new Error("Bug : uncompressed data size mismatch");return b}},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readString(this.fileNameLength),a.skip(c),-1==this.compressedSize||-1==this.uncompressedSize)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize == -1 || uncompressedSize == -1)");if(b=e.findCompression(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+e.pretty(this.compressionMethod)+" unknown (inner file : "+this.fileName+")");if(this.decompressed=new f,this.decompressed.compressedSize=this.compressedSize,this.decompressed.uncompressedSize=this.uncompressedSize,this.decompressed.crc32=this.crc32,this.decompressed.compressionMethod=this.compressionMethod,this.decompressed.getCompressedContent=this.prepareCompressedContent(a,a.index,this.compressedSize,b),this.decompressed.getContent=this.prepareContent(a,a.index,this.compressedSize,b,this.uncompressedSize),this.loadOptions.checkCRC32&&(this.decompressed=e.transformTo("string",this.decompressed.getContent()),g.crc32(this.decompressed)!==this.crc32))throw new Error("Corrupted zip : CRC32 mismatch")},readCentralPart:function(a){if(this.versionMadeBy=a.readString(2),this.versionNeeded=a.readInt(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4),this.fileNameLength=a.readInt(2),this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");this.fileName=a.readString(this.fileNameLength),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readString(this.fileCommentLength),this.dir=16&this.externalFileAttributes?!0:!1},parseZIP64ExtraField:function(){if(this.extraFields[1]){var a=new d(this.extraFields[1].value);this.uncompressedSize===e.MAX_VALUE_32BITS&&(this.uncompressedSize=a.readInt(8)),this.compressedSize===e.MAX_VALUE_32BITS&&(this.compressedSize=a.readInt(8)),this.localHeaderOffset===e.MAX_VALUE_32BITS&&(this.localHeaderOffset=a.readInt(8)),this.diskNumberStart===e.MAX_VALUE_32BITS&&(this.diskNumberStart=a.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index;for(this.extraFields=this.extraFields||{};a.index<e+this.extraFieldsLength;)b=a.readInt(2),c=a.readInt(2),d=a.readString(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){if(this.useUTF8())this.fileName=g.utf8decode(this.fileName),this.fileComment=g.utf8decode(this.fileComment);else{var a=this.findExtraFieldUnicodePath();null!==a&&(this.fileName=a);var b=this.findExtraFieldUnicodeComment();null!==b&&(this.fileComment=b)}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileName)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileComment)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null}},b.exports=c},{"./compressedObject":2,"./object":13,"./stringReader":15,"./utils":21}],24:[function(a,b){"use strict";var c=a("./lib/utils/common").assign,d=a("./lib/deflate"),e=a("./lib/inflate"),f=a("./lib/zlib/constants"),g={};c(g,d,e,f),b.exports=g},{"./lib/deflate":25,"./lib/inflate":26,"./lib/utils/common":27,"./lib/zlib/constants":30}],25:[function(a,b,c){"use strict";function d(a,b){var c=new s(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}function f(a,b){return b=b||{},b.gzip=!0,d(a,b)}var g=a("./zlib/deflate.js"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=0,m=4,n=0,o=1,p=-1,q=0,r=8,s=function(a){this.options=h.assign({level:p,method:r,chunkSize:16384,windowBits:15,memLevel:8,strategy:q,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=g.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==n)throw new Error(j[c]);b.header&&g.deflateSetHeader(this.strm,b.header)};s.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?m:l,e.input="string"==typeof a?i.string2buf(a):a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new h.Buf8(f),e.next_out=0,e.avail_out=f),c=g.deflate(e,d),c!==o&&c!==n)return this.onEnd(c),this.ended=!0,!1;(0===e.avail_out||0===e.avail_in&&d===m)&&this.onData("string"===this.options.to?i.buf2binstring(h.shrinkBuf(e.output,e.next_out)):h.shrinkBuf(e.output,e.next_out))}while((e.avail_in>0||0===e.avail_out)&&c!==o);return d===m?(c=g.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===n):!0},s.prototype.onData=function(a){this.chunks.push(a)},s.prototype.onEnd=function(a){a===n&&(this.result="string"===this.options.to?this.chunks.join(""):h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=s,c.deflate=d,c.deflateRaw=e,c.gzip=f},{"./utils/common":27,"./utils/strings":28,"./zlib/deflate.js":32,"./zlib/messages":37,"./zlib/zstream":39}],26:[function(a,b,c){"use strict";function d(a,b){var c=new m(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}var f=a("./zlib/inflate.js"),g=a("./utils/common"),h=a("./utils/strings"),i=a("./zlib/constants"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=a("./zlib/gzheader"),m=function(a){this.options=g.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=f.inflateInit2(this.strm,b.windowBits);if(c!==i.Z_OK)throw new Error(j[c]);this.header=new l,f.inflateGetHeader(this.strm,this.header)};m.prototype.push=function(a,b){var c,d,e,j,k,l=this.strm,m=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?i.Z_FINISH:i.Z_NO_FLUSH,l.input="string"==typeof a?h.binstring2buf(a):a,l.next_in=0,l.avail_in=l.input.length;do{if(0===l.avail_out&&(l.output=new g.Buf8(m),l.next_out=0,l.avail_out=m),c=f.inflate(l,i.Z_NO_FLUSH),c!==i.Z_STREAM_END&&c!==i.Z_OK)return this.onEnd(c),this.ended=!0,!1;l.next_out&&(0===l.avail_out||c===i.Z_STREAM_END||0===l.avail_in&&d===i.Z_FINISH)&&("string"===this.options.to?(e=h.utf8border(l.output,l.next_out),j=l.next_out-e,k=h.buf2string(l.output,e),l.next_out=j,l.avail_out=m-j,j&&g.arraySet(l.output,l.output,e,j,0),this.onData(k)):this.onData(g.shrinkBuf(l.output,l.next_out)))}while(l.avail_in>0&&c!==i.Z_STREAM_END);return c===i.Z_STREAM_END&&(d=i.Z_FINISH),d===i.Z_FINISH?(c=f.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===i.Z_OK):!0},m.prototype.onData=function(a){this.chunks.push(a)},m.prototype.onEnd=function(a){a===i.Z_OK&&(this.result="string"===this.options.to?this.chunks.join(""):g.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=m,c.inflate=d,c.inflateRaw=e,c.ungzip=d},{"./utils/common":27,"./utils/strings":28,"./zlib/constants":30,"./zlib/gzheader":33,"./zlib/inflate.js":35,"./zlib/messages":37,"./zlib/zstream":39}],27:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;c>b;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;c>b;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],28:[function(a,b,c){"use strict";function d(a,b){if(65537>b&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;b>d;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;256>j;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=new e.Buf8(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;d>c;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;h>c;)if(f=a[c++],128>f)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&h>c;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:65536>f?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":27}],29:[function(a,b){"use strict";function c(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0}b.exports=c},{}],30:[function(a,b){b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],31:[function(a,b){"use strict";function c(){for(var a,b=[],c=0;256>c;c++){a=c;for(var d=0;8>d;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function d(a,b,c,d){var f=e,g=d+c;a=-1^a;for(var h=d;g>h;h++)a=a>>>8^f[255&(a^b[h])];return-1^a}var e=c();b.exports=d},{}],32:[function(a,b,c){"use strict";function d(a,b){return a.msg=G[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(C.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){D._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,C.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=E(a.adler,b,e,c):2===a.state.wrap&&(a.adler=F(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-jb?a.strstart-(a.w_size-jb):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ib,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&m>f);if(d=ib-(m-f),f=m-ib,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-jb)){C.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=hb)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+hb-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<hb)););}while(a.lookahead<jb&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===H)return sb;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return sb;if(a.strstart-a.block_start>=a.w_size-jb&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?sb:sb}function o(a,b){for(var c,d;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c)),a.match_length>=hb)if(d=D._tr_tally(a,a.strstart-a.match_start,a.match_length-hb),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=hb){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function p(a,b){for(var c,d,e;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=hb-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===S||a.match_length===hb&&a.strstart-a.match_start>4096)&&(a.match_length=hb-1)),a.prev_length>=hb&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-hb,d=D._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-hb),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=hb-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return sb}else if(a.match_available){if(d=D._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return sb}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=D._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ib){if(m(a),a.lookahead<=ib&&b===H)return sb;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=hb&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ib;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&f>e);a.match_length=ib-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=hb?(c=D._tr_tally(a,1,a.match_length-hb),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===H)return sb;break}if(a.match_length=0,c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function s(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=B[a.level].max_lazy,a.good_match=B[a.level].good_length,a.nice_match=B[a.level].nice_length,a.max_chain_length=B[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=hb-1,a.match_available=0,a.ins_h=0}function t(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=Y,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new C.Buf16(2*fb),this.dyn_dtree=new C.Buf16(2*(2*db+1)),this.bl_tree=new C.Buf16(2*(2*eb+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new C.Buf16(gb+1),this.heap=new C.Buf16(2*cb+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new C.Buf16(2*cb+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function u(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=X,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?lb:qb,a.adler=2===b.wrap?0:1,b.last_flush=H,D._tr_init(b),M):d(a,O)}function v(a){var b=u(a);return b===M&&s(a.state),b}function w(a,b){return a&&a.state?2!==a.state.wrap?O:(a.state.gzhead=b,M):O}function x(a,b,c,e,f,g){if(!a)return O;var h=1;if(b===R&&(b=6),0>e?(h=0,e=-e):e>15&&(h=2,e-=16),1>f||f>Z||c!==Y||8>e||e>15||0>b||b>9||0>g||g>V)return d(a,O);8===e&&(e=9);var i=new t;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+hb-1)/hb),i.window=new C.Buf8(2*i.w_size),i.head=new C.Buf16(i.hash_size),i.prev=new C.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new C.Buf8(i.pending_buf_size),i.d_buf=i.lit_bufsize>>1,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,v(a)}function y(a,b){return x(a,b,Y,$,_,W)}function z(a,b){var c,h,k,l;if(!a||!a.state||b>L||0>b)return a?d(a,O):O;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===rb&&b!==K)return d(a,0===a.avail_out?Q:O);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===lb)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=F(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=mb):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,wb),h.status=qb);else{var m=Y+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=T||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=kb),m+=31-m%31,h.status=qb,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===mb)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=nb)}else h.status=nb;if(h.status===nb)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=ob)}else h.status=ob;if(h.status===ob)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=pb)}else h.status=pb;if(h.status===pb&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=qb)):h.status=qb),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,M}else if(0===a.avail_in&&e(b)<=e(c)&&b!==K)return d(a,Q);if(h.status===rb&&0!==a.avail_in)return d(a,Q);if(0!==a.avail_in||0!==h.lookahead||b!==H&&h.status!==rb){var o=h.strategy===T?r(h,b):h.strategy===U?q(h,b):B[h.level].func(h,b);if((o===ub||o===vb)&&(h.status=rb),o===sb||o===ub)return 0===a.avail_out&&(h.last_flush=-1),M;if(o===tb&&(b===I?D._tr_align(h):b!==L&&(D._tr_stored_block(h,0,0,!1),b===J&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,M}return b!==K?M:h.wrap<=0?N:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?M:N)}function A(a){var b;return a&&a.state?(b=a.state.status,b!==lb&&b!==mb&&b!==nb&&b!==ob&&b!==pb&&b!==qb&&b!==rb?d(a,O):(a.state=null,b===qb?d(a,P):M)):O}var B,C=a("../utils/common"),D=a("./trees"),E=a("./adler32"),F=a("./crc32"),G=a("./messages"),H=0,I=1,J=3,K=4,L=5,M=0,N=1,O=-2,P=-3,Q=-5,R=-1,S=1,T=2,U=3,V=4,W=0,X=2,Y=8,Z=9,$=15,_=8,ab=29,bb=256,cb=bb+1+ab,db=30,eb=19,fb=2*cb+1,gb=15,hb=3,ib=258,jb=ib+hb+1,kb=32,lb=42,mb=69,nb=73,ob=91,pb=103,qb=113,rb=666,sb=1,tb=2,ub=3,vb=4,wb=3,xb=function(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e};B=[new xb(0,0,0,0,n),new xb(4,4,8,4,o),new xb(4,5,16,8,o),new xb(4,6,32,32,o),new xb(4,4,16,16,p),new xb(8,16,32,32,p),new xb(8,16,128,128,p),new xb(8,32,128,256,p),new xb(32,128,258,1024,p),new xb(32,258,258,4096,p)],c.deflateInit=y,c.deflateInit2=x,c.deflateReset=v,c.deflateResetKeep=u,c.deflateSetHeader=w,c.deflate=z,c.deflateEnd=A,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./messages":37,"./trees":38}],33:[function(a,b){"use strict";function c(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=c},{}],34:[function(a,b){"use strict";var c=30,d=12;b.exports=function(a,b){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;e=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=e.dmax,l=e.wsize,m=e.whave,n=e.wnext,o=e.window,p=e.hold,q=e.bits,r=e.lencode,s=e.distcode,t=(1<<e.lenbits)-1,u=(1<<e.distbits)-1;a:do{15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){e.mode=d;break a}a.msg="invalid literal/length code",e.mode=c;break a}x=65535&v,w&=15,w&&(w>q&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",e.mode=c;break a}if(y=65535&v,w&=15,w>q&&(p+=B[f++]<<q,q+=8,w>q&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",e.mode=c;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&e.correct){a.msg="invalid distance too far back",e.mode=c;break a}if(z=0,A=o,0===n){if(z+=l-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(w>n){if(z+=l+n-w,w-=n,x>w){x-=w;do C[h++]=o[z++];while(--w);if(z=0,x>n){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(g>f&&j>h);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=g>f?5+(g-f):5-(f-g),a.avail_out=j>h?257+(j-h):257-(h-j),e.hold=p,e.bits=q}},{}],35:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new r.Buf16(320),this.work=new r.Buf16(288),this.lendyn=null,this.distdyn=null,this.correct=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=K,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new r.Buf32(ob),b.distcode=b.distdyn=new r.Buf32(pb),b.correct=1,b.back=-1,C):F}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):F}function h(a,b){var c,d;return a&&a.state?(d=a.state,0>b?(c=0,b=-b):(c=(b>>4)+1,48>b&&(b&=15)),b&&(8>b||b>15)?F:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):F}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==C&&(a.state=null),c):F}function j(a){return i(a,rb)}function k(a){if(sb){var b;for(p=new r.Buf32(512),q=new r.Buf32(32),b=0;144>b;)a.lens[b++]=8;for(;256>b;)a.lens[b++]=9;for(;280>b;)a.lens[b++]=7;for(;288>b;)a.lens[b++]=8;for(v(x,a.lens,0,288,p,0,a.work,{bits:9}),b=0;32>b;)a.lens[b++]=5;v(y,a.lens,0,32,q,0,a.work,{bits:5}),sb=!1}a.lencode=p,a.lenbits=9,a.distcode=q,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new r.Buf8(f.wsize)),d>=f.wsize?(r.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),r.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(r.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,ob,pb,qb,rb,sb,tb,ub,vb,wb,xb,yb,zb,Ab=0,Bb=new r.Buf8(4),Cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return F;c=a.state,c.mode===V&&(c.mode=W),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xb=C;a:for(;;)switch(c.mode){case K:if(0===c.wrap){c.mode=W;break}for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0),m=0,n=0,c.mode=L;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=lb;break}if((15&m)!==J){a.msg="unknown compression method",c.mode=lb;break}if(m>>>=4,n-=4,wb=(15&m)+8,0===c.wbits)c.wbits=wb;else if(wb>c.wbits){a.msg="invalid window size",c.mode=lb;break}c.dmax=1<<wb,a.adler=c.check=1,c.mode=512&m?T:V,m=0,n=0;break;case L:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==J){a.msg="unknown compression method",c.mode=lb;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=lb;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=M;case M:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,Bb[2]=m>>>16&255,Bb[3]=m>>>24&255,c.check=t(c.check,Bb,4,0)),m=0,n=0,c.mode=N;case N:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=O;case O:if(1024&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=P;case P:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wb=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),r.arraySet(c.head.extra,e,g,q,wb)),512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=Q;case Q:if(2048&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.name+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=R;case R:if(4096&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.comment+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.comment=null);c.mode=S;case S:if(512&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=lb;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=V;break;case T:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=U;case U:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,E;a.adler=c.check=1,c.mode=V;case V:if(b===A||b===B)break a;case W:if(c.last){m>>>=7&n,n-=7&n,c.mode=ib;break}for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=X;break;case 1:if(k(c),c.mode=bb,b===B){m>>>=2,n-=2;break a}break;case 2:c.mode=$;break;case 3:a.msg="invalid block type",c.mode=lb}m>>>=2,n-=2;break;case X:for(m>>>=7&n,n-=7&n;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=lb;break}if(c.length=65535&m,m=0,n=0,c.mode=Y,b===B)break a;case Y:c.mode=Z;case Z:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;r.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=V;break;case $:for(;14>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=lb;break}c.have=0,c.mode=_;case _:for(;c.have<c.ncode;){for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Cb[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Cb[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,yb={bits:c.lenbits},xb=v(w,c.lens,0,19,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid code lengths set",c.mode=lb;break}c.have=0,c.mode=ab;case ab:for(;c.have<c.nlen+c.ndist;){for(;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(16>sb)m>>>=qb,n-=qb,c.lens[c.have++]=sb;else{if(16===sb){for(zb=qb+2;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qb,n-=qb,0===c.have){a.msg="invalid bit length repeat",c.mode=lb;break}wb=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sb){for(zb=qb+3;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=3+(7&m),m>>>=3,n-=3}else{for(zb=qb+7;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=lb;break}for(;q--;)c.lens[c.have++]=wb}}if(c.mode===lb)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=lb;break}if(c.lenbits=9,yb={bits:c.lenbits},xb=v(x,c.lens,0,c.nlen,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid literal/lengths set",c.mode=lb;break}if(c.distbits=6,c.distcode=c.distdyn,yb={bits:c.distbits},xb=v(y,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,yb),c.distbits=yb.bits,xb){a.msg="invalid distances set",c.mode=lb;break}if(c.mode=bb,b===B)break a;case bb:c.mode=cb;case cb:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,u(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===V&&(c.back=-1);break}for(c.back=0;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(rb&&0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.lencode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,c.length=sb,0===rb){c.mode=hb;break}if(32&rb){c.back=-1,c.mode=V;break}if(64&rb){a.msg="invalid literal/length code",c.mode=lb;break}c.extra=15&rb,c.mode=db;case db:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=eb;case eb:for(;Ab=c.distcode[m&(1<<c.distbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.distcode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,64&rb){a.msg="invalid distance code",c.mode=lb;break}c.offset=sb,c.extra=15&rb,c.mode=fb;case fb:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=lb;break}c.mode=gb;case gb:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.correct){a.msg="invalid distance too far back",c.mode=lb;break}q>c.wnext?(q-=c.wnext,ob=c.wsize-q):ob=c.wnext-q,q>c.length&&(q=c.length),pb=c.window}else pb=f,ob=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pb[ob++];while(--q);0===c.length&&(c.mode=cb);break;case hb:if(0===j)break a;f[h++]=c.length,j--,c.mode=cb;break;case ib:if(c.wrap){for(;32>n;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?t(c.check,f,p,h-p):s(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=lb;break}m=0,n=0}c.mode=jb;case jb:if(c.wrap&&c.flags){for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=lb;break}m=0,n=0}c.mode=kb;case kb:xb=D;break a;case lb:xb=G;break a;case mb:return H;case nb:default:return F}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<lb&&(c.mode<ib||b!==z))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=mb,H):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?t(c.check,f,p,a.next_out-p):s(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===V?128:0)+(c.mode===bb||c.mode===Y?256:0),(0===o&&0===p||b===z)&&xb===C&&(xb=I),xb)}function n(a){if(!a||!a.state)return F;var b=a.state;return b.window&&(b.window=null),a.state=null,C}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?F:(c.head=b,b.done=!1,C)):F}var p,q,r=a("../utils/common"),s=a("./adler32"),t=a("./crc32"),u=a("./inffast"),v=a("./inftrees"),w=0,x=1,y=2,z=4,A=5,B=6,C=0,D=1,E=2,F=-2,G=-3,H=-4,I=-5,J=8,K=1,L=2,M=3,N=4,O=5,P=6,Q=7,R=8,S=9,T=10,U=11,V=12,W=13,X=14,Y=15,Z=16,$=17,_=18,ab=19,bb=20,cb=21,db=22,eb=23,fb=24,gb=25,hb=26,ib=27,jb=28,kb=29,lb=30,mb=31,nb=32,ob=852,pb=592,qb=15,rb=qb,sb=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./inffast":34,"./inftrees":36}],36:[function(a,b){"use strict";var c=a("../utils/common"),d=15,e=852,f=592,g=0,h=1,i=2,j=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],k=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],l=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],m=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,n,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new c.Buf16(d+1),Q=new c.Buf16(d+1),R=null,S=0;for(D=0;d>=D;D++)P[D]=0;for(E=0;o>E;E++)P[b[n+E]]++;for(H=C,G=d;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;G>F&&0===P[F];F++);for(F>H&&(H=F),K=1,D=1;d>=D;D++)if(K<<=1,K-=P[D],0>K)return-1;if(K>0&&(a===g||1!==G))return-1;for(Q[1]=0,D=1;d>D;D++)Q[D+1]=Q[D]+P[D];for(E=0;o>E;E++)0!==b[n+E]&&(r[Q[b[n+E]]++]=E);if(a===g?(N=R=r,y=19):a===h?(N=j,O-=257,R=k,S-=257,y=256):(N=l,R=m,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===h&&L>e||a===i&&L>f)return 1;for(var T=0;;){T++,z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[n+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;G>I+J&&(K-=P[I+J],!(0>=K));)I++,K<<=1;if(L+=1<<I,a===h&&L>e||a===i&&L>f)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":27}],37:[function(a,b){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],38:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a){return 256>a?gb[a]:gb[256+(a>>>7)]}function f(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function g(a,b,c){a.bi_valid>V-c?(a.bi_buf|=b<<a.bi_valid&65535,f(a,a.bi_buf),a.bi_buf=b>>V-a.bi_valid,a.bi_valid+=c-V):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function h(a,b,c){g(a,c[2*b],c[2*b+1])}function i(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function j(a){16===a.bi_valid?(f(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function k(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;U>=f;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,c=a.heap_max+1;T>c;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function l(a,b,c){var d,e,f=new Array(U+1),g=0;for(d=1;U>=d;d++)f[d]=g=g+c[d-1]<<1;for(e=0;b>=e;e++){var h=a[2*e+1];0!==h&&(a[2*e]=i(f[h]++,h))}}function m(){var a,b,c,d,e,f=new Array(U+1);for(c=0,d=0;O-1>d;d++)for(ib[d]=c,a=0;a<1<<_[d];a++)hb[c++]=d;for(hb[c-1]=d,e=0,d=0;16>d;d++)for(jb[d]=e,a=0;a<1<<ab[d];a++)gb[e++]=d;for(e>>=7;R>d;d++)for(jb[d]=e<<7,a=0;a<1<<ab[d]-7;a++)gb[256+e++]=d;for(b=0;U>=b;b++)f[b]=0;for(a=0;143>=a;)eb[2*a+1]=8,a++,f[8]++;for(;255>=a;)eb[2*a+1]=9,a++,f[9]++;for(;279>=a;)eb[2*a+1]=7,a++,f[7]++;for(;287>=a;)eb[2*a+1]=8,a++,f[8]++;for(l(eb,Q+1,f),a=0;R>a;a++)fb[2*a+1]=5,fb[2*a]=i(a,5);kb=new nb(eb,_,P+1,Q,U),lb=new nb(fb,ab,0,R,U),mb=new nb(new Array(0),bb,0,S,W)}function n(a){var b;for(b=0;Q>b;b++)a.dyn_ltree[2*b]=0;for(b=0;R>b;b++)a.dyn_dtree[2*b]=0;for(b=0;S>b;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*X]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function o(a){a.bi_valid>8?f(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function p(a,b,c,d){o(a),d&&(f(a,c),f(a,~c)),E.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function q(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function r(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&q(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!q(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function s(a,b,c){var d,f,i,j,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],f=a.pending_buf[a.l_buf+k],k++,0===d?h(a,f,b):(i=hb[f],h(a,i+P+1,b),j=_[i],0!==j&&(f-=ib[i],g(a,f,j)),d--,i=e(d),h(a,i,c),j=ab[i],0!==j&&(d-=jb[i],g(a,d,j)));while(k<a.last_lit);h(a,X,b)}function t(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=T,c=0;i>c;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=2>j?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)r(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],r(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,r(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],k(a,b),l(f,j,a.bl_count)}function u(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;c>=d;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(j>h?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*Y]++):10>=h?a.bl_tree[2*Z]++:a.bl_tree[2*$]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function v(a,b,c){var d,e,f=-1,i=b[1],j=0,k=7,l=4;for(0===i&&(k=138,l=3),d=0;c>=d;d++)if(e=i,i=b[2*(d+1)+1],!(++j<k&&e===i)){if(l>j){do h(a,e,a.bl_tree);while(0!==--j)}else 0!==e?(e!==f&&(h(a,e,a.bl_tree),j--),h(a,Y,a.bl_tree),g(a,j-3,2)):10>=j?(h(a,Z,a.bl_tree),g(a,j-3,3)):(h(a,$,a.bl_tree),g(a,j-11,7));j=0,f=e,0===i?(k=138,l=3):e===i?(k=6,l=3):(k=7,l=4)}}function w(a){var b;for(u(a,a.dyn_ltree,a.l_desc.max_code),u(a,a.dyn_dtree,a.d_desc.max_code),t(a,a.bl_desc),b=S-1;b>=3&&0===a.bl_tree[2*cb[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function x(a,b,c,d){var e;for(g(a,b-257,5),g(a,c-1,5),g(a,d-4,4),e=0;d>e;e++)g(a,a.bl_tree[2*cb[e]+1],3);v(a,a.dyn_ltree,b-1),v(a,a.dyn_dtree,c-1)}function y(a){var b,c=4093624447;for(b=0;31>=b;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return G;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return H;for(b=32;P>b;b++)if(0!==a.dyn_ltree[2*b])return H;return G}function z(a){pb||(m(),pb=!0),a.l_desc=new ob(a.dyn_ltree,kb),a.d_desc=new ob(a.dyn_dtree,lb),a.bl_desc=new ob(a.bl_tree,mb),a.bi_buf=0,a.bi_valid=0,n(a)}function A(a,b,c,d){g(a,(J<<1)+(d?1:0),3),p(a,b,c,!0)}function B(a){g(a,K<<1,3),h(a,X,eb),j(a)}function C(a,b,c,d){var e,f,h=0;a.level>0?(a.strm.data_type===I&&(a.strm.data_type=y(a)),t(a,a.l_desc),t(a,a.d_desc),h=w(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,e>=f&&(e=f)):e=f=c+5,e>=c+4&&-1!==b?A(a,b,c,d):a.strategy===F||f===e?(g(a,(K<<1)+(d?1:0),3),s(a,eb,fb)):(g(a,(L<<1)+(d?1:0),3),x(a,a.l_desc.max_code+1,a.d_desc.max_code+1,h+1),s(a,a.dyn_ltree,a.dyn_dtree)),n(a),d&&o(a)}function D(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(hb[c]+P+1)]++,a.dyn_dtree[2*e(b)]++),a.last_lit===a.lit_bufsize-1}var E=a("../utils/common"),F=4,G=0,H=1,I=2,J=0,K=1,L=2,M=3,N=258,O=29,P=256,Q=P+1+O,R=30,S=19,T=2*Q+1,U=15,V=16,W=7,X=256,Y=16,Z=17,$=18,_=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ab=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],bb=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],db=512,eb=new Array(2*(Q+2));d(eb);var fb=new Array(2*R);d(fb);var gb=new Array(db);d(gb);var hb=new Array(N-M+1);d(hb);var ib=new Array(O);d(ib);var jb=new Array(R);d(jb);var kb,lb,mb,nb=function(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length},ob=function(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b},pb=!1;c._tr_init=z,c._tr_stored_block=A,c._tr_flush_block=C,c._tr_tally=D,c._tr_align=B},{"../utils/common":27}],39:[function(a,b){"use strict";function c(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=c},{}]},{},[9])(9)});'use strict';if(tr.isVinn){global.window={};}'use strict';if(tr.isVinn){global.JSZip=global.window.JSZip;global.window=undefined;}else if(tr.isNode){const jsZipAbsPath=HTMLImportsLoader.hrefToAbsolutePath('/jszip.min.js');const jsZipModule=require(jsZipAbsPath);global.JSZip=jsZipModule;}'use strict';tr.exportTo('tr.e.importer',function(){const GZIP_MEMBER_HEADER_ID_SIZE=3;const GZIP_HEADER_ID1=0x1f;const GZIP_HEADER_ID2=0x8b;const GZIP_DEFLATE_COMPRESSION=8;function GzipImporter(model,eventData){this.inflateAsTraceStream=false;if(typeof(eventData)==='string'||eventData instanceof String){eventData=JSZip.utils.transformTo('uint8array',eventData);}else if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);}else if(eventData instanceof tr.b.InMemoryTraceStream){eventData=eventData.data;this.inflateAsTraceStream_=true;}else{throw new Error('Unknown gzip data format');} +return{InMemoryTraceStream,};});!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,f,g,h,i,j="",k=0;k<a.length;)b=a.charCodeAt(k++),c=a.charCodeAt(k++),e=a.charCodeAt(k++),f=b>>2,g=(3&b)<<4|c>>4,h=(15&c)<<2|e>>6,i=63&e,isNaN(c)?h=i=64:isNaN(e)&&(i=64),j=j+d.charAt(f)+d.charAt(g)+d.charAt(h)+d.charAt(i);return j},c.decode=function(a){var b,c,e,f,g,h,i,j="",k=0;for(a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");k<a.length;)f=d.indexOf(a.charAt(k++)),g=d.indexOf(a.charAt(k++)),h=d.indexOf(a.charAt(k++)),i=d.indexOf(a.charAt(k++)),b=f<<2|g>>4,c=(15&g)<<4|h>>2,e=(3&h)<<6|i,j+=String.fromCharCode(b),64!=h&&(j+=String.fromCharCode(c)),64!=i&&(j+=String.fromCharCode(e));return j}},{}],2:[function(a,b){"use strict";function c(){this.compressedSize=0,this.uncompressedSize=0,this.crc32=0,this.compressionMethod=null,this.compressedContent=null}c.prototype={getContent:function(){return null},getCompressedContent:function(){return null}},b.exports=c},{}],3:[function(a,b,c){"use strict";c.STORE={magic:"\x00\x00",compress:function(a){return a},uncompress:function(a){return a},compressInputType:null,uncompressInputType:null},c.DEFLATE=a("./flate")},{"./flate":8}],4:[function(a,b){"use strict";var c=a("./utils"),d=[0,1996959894,3993919788,2567524794,124634137,1886057615,3915621685,2657392035,249268274,2044508324,3772115230,2547177864,162941995,2125561021,3887607047,2428444049,498536548,1789927666,4089016648,2227061214,450548861,1843258603,4107580753,2211677639,325883990,1684777152,4251122042,2321926636,335633487,1661365465,4195302755,2366115317,997073096,1281953886,3579855332,2724688242,1006888145,1258607687,3524101629,2768942443,901097722,1119000684,3686517206,2898065728,853044451,1172266101,3705015759,2882616665,651767980,1373503546,3369554304,3218104598,565507253,1454621731,3485111705,3099436303,671266974,1594198024,3322730930,2970347812,795835527,1483230225,3244367275,3060149565,1994146192,31158534,2563907772,4023717930,1907459465,112637215,2680153253,3904427059,2013776290,251722036,2517215374,3775830040,2137656763,141376813,2439277719,3865271297,1802195444,476864866,2238001368,4066508878,1812370925,453092731,2181625025,4111451223,1706088902,314042704,2344532202,4240017532,1658658271,366619977,2362670323,4224994405,1303535960,984961486,2747007092,3569037538,1256170817,1037604311,2765210733,3554079995,1131014506,879679996,2909243462,3663771856,1141124467,855842277,2852801631,3708648649,1342533948,654459306,3188396048,3373015174,1466479909,544179635,3110523913,3462522015,1591671054,702138776,2966460450,3352799412,1504918807,783551873,3082640443,3233442989,3988292384,2596254646,62317068,1957810842,3939845945,2647816111,81470997,1943803523,3814918930,2489596804,225274430,2053790376,3826175755,2466906013,167816743,2097651377,4027552580,2265490386,503444072,1762050814,4150417245,2154129355,426522225,1852507879,4275313526,2312317920,282753626,1742555852,4189708143,2394877945,397917763,1622183637,3604390888,2714866558,953729732,1340076626,3518719985,2797360999,1068828381,1219638859,3624741850,2936675148,906185462,1090812512,3747672003,2825379669,829329135,1181335161,3412177804,3160834842,628085408,1382605366,3423369109,3138078467,570562233,1426400815,3317316542,2998733608,733239954,1555261956,3268935591,3050360625,752459403,1541320221,2607071920,3965973030,1969922972,40735498,2617837225,3943577151,1913087877,83908371,2512341634,3803740692,2075208622,213261112,2463272603,3855990285,2094854071,198958881,2262029012,4057260610,1759359992,534414190,2176718541,4139329115,1873836001,414664567,2282248934,4279200368,1711684554,285281116,2405801727,4167216745,1634467795,376229701,2685067896,3608007406,1308918612,956543938,2808555105,3495958263,1231636301,1047427035,2932959818,3654703836,1088359270,936918e3,2847714899,3736837829,1202900863,817233897,3183342108,3401237130,1404277552,615818150,3134207493,3453421203,1423857449,601450431,3009837614,3294710456,1567103746,711928724,3020668471,3272380065,1510334235,755167117];b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var e="string"!==c.getTypeOf(a);"undefined"==typeof b&&(b=0);var f=0,g=0,h=0;b=-1^b;for(var i=0,j=a.length;j>i;i++)h=e?a[i]:a.charCodeAt(i),g=255&(b^h),f=d[g],b=b>>>8^f;return-1^b}},{"./utils":21}],5:[function(a,b){"use strict";function c(){this.data=null,this.length=0,this.index=0}var d=a("./utils");c.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<a||0>a)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return d.transformTo("string",this.readData(a))},readData:function(){},lastIndexOfSignature:function(){},readDate:function(){var a=this.readInt(4);return new Date((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1)}},b.exports=c},{"./utils":21}],6:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!1,c.date=null,c.compression=null,c.comment=null},{}],7:[function(a,b,c){"use strict";var d=a("./utils");c.string2binary=function(a){return d.string2binary(a)},c.string2Uint8Array=function(a){return d.transformTo("uint8array",a)},c.uint8Array2String=function(a){return d.transformTo("string",a)},c.string2Blob=function(a){var b=d.transformTo("arraybuffer",a);return d.arrayBuffer2Blob(b)},c.arrayBuffer2Blob=function(a){return d.arrayBuffer2Blob(a)},c.transformTo=function(a,b){return d.transformTo(a,b)},c.getTypeOf=function(a){return d.getTypeOf(a)},c.checkSupport=function(a){return d.checkSupport(a)},c.MAX_VALUE_16BITS=d.MAX_VALUE_16BITS,c.MAX_VALUE_32BITS=d.MAX_VALUE_32BITS,c.pretty=function(a){return d.pretty(a)},c.findCompression=function(a){return d.findCompression(a)},c.isRegExp=function(a){return d.isRegExp(a)}},{"./utils":21}],8:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,e=a("pako");c.uncompressInputType=d?"uint8array":"array",c.compressInputType=d?"uint8array":"array",c.magic="\b\x00",c.compress=function(a){return e.deflateRaw(a)},c.uncompress=function(a){return e.inflateRaw(a)}},{pako:24}],9:[function(a,b){"use strict";function c(a,b){return this instanceof c?(this.files={},this.comment=null,this.root="",a&&this.load(a,b),void(this.clone=function(){var a=new c;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a})):new c(a,b)}var d=a("./base64");c.prototype=a("./object"),c.prototype.load=a("./load"),c.support=a("./support"),c.defaults=a("./defaults"),c.utils=a("./deprecatedPublicUtils"),c.base64={encode:function(a){return d.encode(a)},decode:function(a){return d.decode(a)}},c.compressions=a("./compressions"),b.exports=c},{"./base64":1,"./compressions":3,"./defaults":6,"./deprecatedPublicUtils":7,"./load":10,"./object":13,"./support":17}],10:[function(a,b){"use strict";var c=a("./base64"),d=a("./zipEntries");b.exports=function(a,b){var e,f,g,h;for(b=b||{},b.base64&&(a=c.decode(a)),f=new d(a,b),e=f.files,g=0;g<e.length;g++)h=e[g],this.file(h.fileName,h.decompressed,{binary:!0,optimizedBinaryString:!0,date:h.date,dir:h.dir,comment:h.fileComment.length?h.fileComment:null,createFolders:b.createFolders});return f.zipComment.length&&(this.comment=f.zipComment),this}},{"./base64":1,"./zipEntries":22}],11:[function(a,b){(function(a){"use strict";b.exports=function(b,c){return new a(b,c)},b.exports.test=function(b){return a.isBuffer(b)}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],12:[function(a,b){"use strict";function c(a){this.data=a,this.length=this.data.length,this.index=0}var d=a("./uint8ArrayReader");c.prototype=new d,c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./uint8ArrayReader":18}],13:[function(a,b){"use strict";var c=a("./support"),d=a("./utils"),e=a("./crc32"),f=a("./signature"),g=a("./defaults"),h=a("./base64"),i=a("./compressions"),j=a("./compressedObject"),k=a("./nodeBuffer"),l=a("./utf8"),m=a("./stringWriter"),n=a("./uint8ArrayWriter"),o=function(a){if(a._data instanceof j&&(a._data=a._data.getContent(),a.options.binary=!0,a.options.base64=!1,"uint8array"===d.getTypeOf(a._data))){var b=a._data;a._data=new Uint8Array(b.length),0!==b.length&&a._data.set(b,0)}return a._data},p=function(a){var b=o(a),e=d.getTypeOf(b);return"string"===e?!a.options.binary&&c.nodebuffer?k(b,"utf-8"):a.asBinary():b},q=function(a){var b=o(this);return null===b||"undefined"==typeof b?"":(this.options.base64&&(b=h.decode(b)),b=a&&this.options.binary?A.utf8decode(b):d.transformTo("string",b),a||this.options.binary||(b=d.transformTo("string",A.utf8encode(b))),b)},r=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this._data=b,this.options=c,this._initialMetadata={dir:c.dir,date:c.date}};r.prototype={asText:function(){return q.call(this,!0)},asBinary:function(){return q.call(this,!1)},asNodeBuffer:function(){var a=p(this);return d.transformTo("nodebuffer",a)},asUint8Array:function(){var a=p(this);return d.transformTo("uint8array",a)},asArrayBuffer:function(){return this.asUint8Array().buffer}};var s=function(a,b){var c,d="";for(c=0;b>c;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},t=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},u=function(a){return a=a||{},a.base64!==!0||null!==a.binary&&void 0!==a.binary||(a.binary=!0),a=t(a,g),a.date=a.date||new Date,null!==a.compression&&(a.compression=a.compression.toUpperCase()),a},v=function(a,b,c){var e,f=d.getTypeOf(b);if(c=u(c),c.createFolders&&(e=w(a))&&x.call(this,e,!0),c.dir||null===b||"undefined"==typeof b)c.base64=!1,c.binary=!1,b=null;else if("string"===f)c.binary&&!c.base64&&c.optimizedBinaryString!==!0&&(b=d.string2binary(b));else{if(c.base64=!1,c.binary=!0,!(f||b instanceof j))throw new Error("The data of '"+a+"' is in an unsupported format !");"arraybuffer"===f&&(b=d.transformTo("uint8array",b))}var g=new r(a,b,c);return this.files[a]=g,g},w=function(a){"/"==a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},x=function(a,b){return"/"!=a.slice(-1)&&(a+="/"),b="undefined"!=typeof b?b:!1,this.files[a]||v.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},y=function(a,b){var c,f=new j;return a._data instanceof j?(f.uncompressedSize=a._data.uncompressedSize,f.crc32=a._data.crc32,0===f.uncompressedSize||a.dir?(b=i.STORE,f.compressedContent="",f.crc32=0):a._data.compressionMethod===b.magic?f.compressedContent=a._data.getCompressedContent():(c=a._data.getContent(),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c)))):(c=p(a),(!c||0===c.length||a.dir)&&(b=i.STORE,c=""),f.uncompressedSize=c.length,f.crc32=e(c),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c))),f.compressedSize=f.compressedContent.length,f.compressionMethod=b.magic,f},z=function(a,b,c,g){var h,i,j,k,m=(c.compressedContent,d.transformTo("string",l.utf8encode(b.name))),n=b.comment||"",o=d.transformTo("string",l.utf8encode(n)),p=m.length!==b.name.length,q=o.length!==n.length,r=b.options,t="",u="",v="";j=b._initialMetadata.dir!==b.dir?b.dir:r.dir,k=b._initialMetadata.date!==b.date?b.date:r.date,h=k.getHours(),h<<=6,h|=k.getMinutes(),h<<=5,h|=k.getSeconds()/2,i=k.getFullYear()-1980,i<<=4,i|=k.getMonth()+1,i<<=5,i|=k.getDate(),p&&(u=s(1,1)+s(e(m),4)+m,t+="up"+s(u.length,2)+u),q&&(v=s(1,1)+s(this.crc32(o),4)+o,t+="uc"+s(v.length,2)+v);var w="";w+="\n\x00",w+=p||q?"\x00\b":"\x00\x00",w+=c.compressionMethod,w+=s(h,2),w+=s(i,2),w+=s(c.crc32,4),w+=s(c.compressedSize,4),w+=s(c.uncompressedSize,4),w+=s(m.length,2),w+=s(t.length,2);var x=f.LOCAL_FILE_HEADER+w+m+t,y=f.CENTRAL_FILE_HEADER+"\x00"+w+s(o.length,2)+"\x00\x00\x00\x00"+(j===!0?"\x00\x00\x00":"\x00\x00\x00\x00")+s(g,4)+m+t+o;return{fileRecord:x,dirRecord:y,compressedObject:c}},A={load:function(){throw new Error("Load method is not defined. Is the file jszip-load.js included ?")},filter:function(a){var b,c,d,e,f=[];for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],e=new r(d.name,d._data,t(d.options)),c=b.slice(this.root.length,b.length),b.slice(0,this.root.length)===this.root&&a(c,e)&&f.push(e));return f},file:function(a,b,c){if(1===arguments.length){if(d.isRegExp(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}return this.filter(function(b,c){return!c.dir&&b===a})[0]||null}return a=this.root+a,v.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d.isRegExp(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=x.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!=a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){a=t(a||{},{base64:!0,compression:"STORE",type:"base64",comment:null}),d.checkSupport(a.type);var b,c,e=[],g=0,j=0,k=d.transformTo("string",this.utf8encode(a.comment||this.comment||""));for(var l in this.files)if(this.files.hasOwnProperty(l)){var o=this.files[l],p=o.options.compression||a.compression.toUpperCase(),q=i[p];if(!q)throw new Error(p+" is not a valid compression method !");var r=y.call(this,o,q),u=z.call(this,l,o,r,g);g+=u.fileRecord.length+r.compressedSize,j+=u.dirRecord.length,e.push(u)}var v="";v=f.CENTRAL_DIRECTORY_END+"\x00\x00\x00\x00"+s(e.length,2)+s(e.length,2)+s(j,4)+s(g,4)+s(k.length,2)+k;var w=a.type.toLowerCase();for(b="uint8array"===w||"arraybuffer"===w||"blob"===w||"nodebuffer"===w?new n(g+j+v.length):new m(g+j+v.length),c=0;c<e.length;c++)b.append(e[c].fileRecord),b.append(e[c].compressedObject.compressedContent);for(c=0;c<e.length;c++)b.append(e[c].dirRecord);b.append(v);var x=b.finalize();switch(a.type.toLowerCase()){case"uint8array":case"arraybuffer":case"nodebuffer":return d.transformTo(a.type.toLowerCase(),x);case"blob":return d.arrayBuffer2Blob(d.transformTo("arraybuffer",x));case"base64":return a.base64?h.encode(x):x;default:return x}},crc32:function(a,b){return e(a,b)},utf8encode:function(a){return d.transformTo("string",l.utf8encode(a))},utf8decode:function(a){return l.utf8decode(a)}};b.exports=A},{"./base64":1,"./compressedObject":2,"./compressions":3,"./crc32":4,"./defaults":6,"./nodeBuffer":11,"./signature":14,"./stringWriter":16,"./support":17,"./uint8ArrayWriter":19,"./utf8":20,"./utils":21}],14:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],15:[function(a,b){"use strict";function c(a,b){this.data=a,b||(this.data=e.string2binary(this.data)),this.length=this.data.length,this.index=0}var d=a("./dataReader"),e=a("./utils");c.prototype=new d,c.prototype.byteAt=function(a){return this.data.charCodeAt(a)},c.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)},c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5,"./utils":21}],16:[function(a,b){"use strict";var c=a("./utils"),d=function(){this.data=[]};d.prototype={append:function(a){a=c.transformTo("string",a),this.data.push(a)},finalize:function(){return this.data.join("")}},b.exports=d},{"./utils":21}],17:[function(a,b,c){(function(a){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof a,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var b=new ArrayBuffer(0);try{c.blob=0===new Blob([b],{type:"application/zip"}).size}catch(d){try{var e=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,f=new e;f.append(b),c.blob=0===f.getBlob("application/zip").size}catch(d){c.blob=!1}}}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],18:[function(a,b){"use strict";function c(a){a&&(this.data=a,this.length=this.data.length,this.index=0)}var d=a("./dataReader");c.prototype=new d,c.prototype.byteAt=function(a){return this.data[a]},c.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f;return-1},c.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5}],19:[function(a,b){"use strict";var c=a("./utils"),d=function(a){this.data=new Uint8Array(a),this.index=0};d.prototype={append:function(a){0!==a.length&&(a=c.transformTo("uint8array",a),this.data.set(a,this.index),this.index+=a.length)},finalize:function(){return this.data}},b.exports=d},{"./utils":21}],20:[function(a,b,c){"use strict";for(var d=a("./utils"),e=a("./support"),f=a("./nodeBuffer"),g=new Array(256),h=0;256>h;h++)g[h]=h>=252?6:h>=248?5:h>=240?4:h>=224?3:h>=192?2:1;g[254]=g[254]=1;var i=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=e.uint8array?new Uint8Array(i):new Array(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},j=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+g[a[c]]>b?c:b},k=function(a){var b,c,e,f,h=a.length,i=new Array(2*h);for(c=0,b=0;h>b;)if(e=a[b++],128>e)i[c++]=e;else if(f=g[e],f>4)i[c++]=65533,b+=f-1;else{for(e&=2===f?31:3===f?15:7;f>1&&h>b;)e=e<<6|63&a[b++],f--;f>1?i[c++]=65533:65536>e?i[c++]=e:(e-=65536,i[c++]=55296|e>>10&1023,i[c++]=56320|1023&e)}return i.length!==c&&(i.subarray?i=i.subarray(0,c):i.length=c),d.applyFromCharCode(i)};c.utf8encode=function(a){return e.nodebuffer?f(a,"utf-8"):i(a)},c.utf8decode=function(a){if(e.nodebuffer)return d.transformTo("nodebuffer",a).toString("utf-8");a=d.transformTo(e.uint8array?"uint8array":"array",a);for(var b=[],c=0,f=a.length,g=65536;f>c;){var h=j(a,Math.min(c+g,f));b.push(e.uint8array?k(a.subarray(c,h)):k(a.slice(c,h))),c=h}return b.join("")}},{"./nodeBuffer":11,"./support":17,"./utils":21}],21:[function(a,b,c){"use strict";function d(a){return a}function e(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function f(a){var b=65536,d=[],e=a.length,f=c.getTypeOf(a),g=0,h=!0;try{switch(f){case"uint8array":String.fromCharCode.apply(null,new Uint8Array(0));break;case"nodebuffer":String.fromCharCode.apply(null,j(0))}}catch(i){h=!1}if(!h){for(var k="",l=0;l<a.length;l++)k+=String.fromCharCode(a[l]);return k}for(;e>g&&b>1;)try{d.push("array"===f||"nodebuffer"===f?String.fromCharCode.apply(null,a.slice(g,Math.min(g+b,e))):String.fromCharCode.apply(null,a.subarray(g,Math.min(g+b,e)))),g+=b}catch(i){b=Math.floor(b/2)}return d.join("")}function g(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];return b}var h=a("./support"),i=a("./compressions"),j=a("./nodeBuffer");c.string2binary=function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(255&a.charCodeAt(c));return b},c.arrayBuffer2Blob=function(a){c.checkSupport("blob");try{return new Blob([a],{type:"application/zip"})}catch(b){try{var d=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,e=new d;return e.append(a),e.getBlob("application/zip")}catch(b){throw new Error("Bug : can't construct the Blob.")}}},c.applyFromCharCode=f;var k={};k.string={string:d,array:function(a){return e(a,new Array(a.length))},arraybuffer:function(a){return k.string.uint8array(a).buffer},uint8array:function(a){return e(a,new Uint8Array(a.length))},nodebuffer:function(a){return e(a,j(a.length))}},k.array={string:f,array:d,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(a)}},k.arraybuffer={string:function(a){return f(new Uint8Array(a))},array:function(a){return g(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:d,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(new Uint8Array(a))}},k.uint8array={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:d,nodebuffer:function(a){return j(a)}},k.nodebuffer={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return k.nodebuffer.uint8array(a).buffer},uint8array:function(a){return g(a,new Uint8Array(a.length))},nodebuffer:d},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=k[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":h.nodebuffer&&j.test(a)?"nodebuffer":h.uint8array&&a instanceof Uint8Array?"uint8array":h.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=h[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this browser")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(16>b?"0":"")+b.toString(16).toUpperCase();return d},c.findCompression=function(a){for(var b in i)if(i.hasOwnProperty(b)&&i[b].magic===a)return i[b];return null},c.isRegExp=function(a){return"[object RegExp]"===Object.prototype.toString.call(a)}},{"./compressions":3,"./nodeBuffer":11,"./support":17}],22:[function(a,b){"use strict";function c(a,b){this.files=[],this.loadOptions=b,a&&this.load(a)}var d=a("./stringReader"),e=a("./nodeBufferReader"),f=a("./uint8ArrayReader"),g=a("./utils"),h=a("./signature"),i=a("./zipEntry"),j=a("./support"),k=a("./object");c.prototype={checkSignature:function(a){var b=this.reader.readString(4);if(b!==a)throw new Error("Corrupted zip or bug : unexpected signature ("+g.pretty(b)+", expected "+g.pretty(a)+")")},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2),this.zipComment=this.reader.readString(this.zipCommentLength),this.zipComment=k.utf8decode(this.zipComment)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.versionMadeBy=this.reader.readString(2),this.versionNeeded=this.reader.readInt(2),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;d>e;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readString(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(h.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readString(4)===h.CENTRAL_FILE_HEADER;)a=new i({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(h.CENTRAL_DIRECTORY_END);if(-1===a)throw new Error("Corrupted zip : can't find end of central directory");if(this.reader.setIndex(a),this.checkSignature(h.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===g.MAX_VALUE_16BITS||this.diskWithCentralDirStart===g.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===g.MAX_VALUE_16BITS||this.centralDirRecords===g.MAX_VALUE_16BITS||this.centralDirSize===g.MAX_VALUE_32BITS||this.centralDirOffset===g.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),-1===a)throw new Error("Corrupted zip : can't find the ZIP64 end of central directory locator");this.reader.setIndex(a),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}},prepareReader:function(a){var b=g.getTypeOf(a);this.reader="string"!==b||j.uint8array?"nodebuffer"===b?new e(a):new f(g.transformTo("uint8array",a)):new d(a,this.loadOptions.optimizedBinaryString)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=c},{"./nodeBufferReader":12,"./object":13,"./signature":14,"./stringReader":15,"./support":17,"./uint8ArrayReader":18,"./utils":21,"./zipEntry":23}],23:[function(a,b){"use strict";function c(a,b){this.options=a,this.loadOptions=b}var d=a("./stringReader"),e=a("./utils"),f=a("./compressedObject"),g=a("./object");c.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},prepareCompressedContent:function(a,b,c){return function(){var d=a.index;a.setIndex(b);var e=a.readData(c);return a.setIndex(d),e}},prepareContent:function(a,b,c,d,f){return function(){var a=e.transformTo(d.uncompressInputType,this.getCompressedContent()),b=d.uncompress(a);if(b.length!==f)throw new Error("Bug : uncompressed data size mismatch");return b}},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readString(this.fileNameLength),a.skip(c),-1==this.compressedSize||-1==this.uncompressedSize)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize == -1 || uncompressedSize == -1)");if(b=e.findCompression(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+e.pretty(this.compressionMethod)+" unknown (inner file : "+this.fileName+")");if(this.decompressed=new f,this.decompressed.compressedSize=this.compressedSize,this.decompressed.uncompressedSize=this.uncompressedSize,this.decompressed.crc32=this.crc32,this.decompressed.compressionMethod=this.compressionMethod,this.decompressed.getCompressedContent=this.prepareCompressedContent(a,a.index,this.compressedSize,b),this.decompressed.getContent=this.prepareContent(a,a.index,this.compressedSize,b,this.uncompressedSize),this.loadOptions.checkCRC32&&(this.decompressed=e.transformTo("string",this.decompressed.getContent()),g.crc32(this.decompressed)!==this.crc32))throw new Error("Corrupted zip : CRC32 mismatch")},readCentralPart:function(a){if(this.versionMadeBy=a.readString(2),this.versionNeeded=a.readInt(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4),this.fileNameLength=a.readInt(2),this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");this.fileName=a.readString(this.fileNameLength),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readString(this.fileCommentLength),this.dir=16&this.externalFileAttributes?!0:!1},parseZIP64ExtraField:function(){if(this.extraFields[1]){var a=new d(this.extraFields[1].value);this.uncompressedSize===e.MAX_VALUE_32BITS&&(this.uncompressedSize=a.readInt(8)),this.compressedSize===e.MAX_VALUE_32BITS&&(this.compressedSize=a.readInt(8)),this.localHeaderOffset===e.MAX_VALUE_32BITS&&(this.localHeaderOffset=a.readInt(8)),this.diskNumberStart===e.MAX_VALUE_32BITS&&(this.diskNumberStart=a.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index;for(this.extraFields=this.extraFields||{};a.index<e+this.extraFieldsLength;)b=a.readInt(2),c=a.readInt(2),d=a.readString(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){if(this.useUTF8())this.fileName=g.utf8decode(this.fileName),this.fileComment=g.utf8decode(this.fileComment);else{var a=this.findExtraFieldUnicodePath();null!==a&&(this.fileName=a);var b=this.findExtraFieldUnicodeComment();null!==b&&(this.fileComment=b)}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileName)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileComment)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null}},b.exports=c},{"./compressedObject":2,"./object":13,"./stringReader":15,"./utils":21}],24:[function(a,b){"use strict";var c=a("./lib/utils/common").assign,d=a("./lib/deflate"),e=a("./lib/inflate"),f=a("./lib/zlib/constants"),g={};c(g,d,e,f),b.exports=g},{"./lib/deflate":25,"./lib/inflate":26,"./lib/utils/common":27,"./lib/zlib/constants":30}],25:[function(a,b,c){"use strict";function d(a,b){var c=new s(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}function f(a,b){return b=b||{},b.gzip=!0,d(a,b)}var g=a("./zlib/deflate.js"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=0,m=4,n=0,o=1,p=-1,q=0,r=8,s=function(a){this.options=h.assign({level:p,method:r,chunkSize:16384,windowBits:15,memLevel:8,strategy:q,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=g.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==n)throw new Error(j[c]);b.header&&g.deflateSetHeader(this.strm,b.header)};s.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?m:l,e.input="string"==typeof a?i.string2buf(a):a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new h.Buf8(f),e.next_out=0,e.avail_out=f),c=g.deflate(e,d),c!==o&&c!==n)return this.onEnd(c),this.ended=!0,!1;(0===e.avail_out||0===e.avail_in&&d===m)&&this.onData("string"===this.options.to?i.buf2binstring(h.shrinkBuf(e.output,e.next_out)):h.shrinkBuf(e.output,e.next_out))}while((e.avail_in>0||0===e.avail_out)&&c!==o);return d===m?(c=g.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===n):!0},s.prototype.onData=function(a){this.chunks.push(a)},s.prototype.onEnd=function(a){a===n&&(this.result="string"===this.options.to?this.chunks.join(""):h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=s,c.deflate=d,c.deflateRaw=e,c.gzip=f},{"./utils/common":27,"./utils/strings":28,"./zlib/deflate.js":32,"./zlib/messages":37,"./zlib/zstream":39}],26:[function(a,b,c){"use strict";function d(a,b){var c=new m(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}var f=a("./zlib/inflate.js"),g=a("./utils/common"),h=a("./utils/strings"),i=a("./zlib/constants"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=a("./zlib/gzheader"),m=function(a){this.options=g.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=f.inflateInit2(this.strm,b.windowBits);if(c!==i.Z_OK)throw new Error(j[c]);this.header=new l,f.inflateGetHeader(this.strm,this.header)};m.prototype.push=function(a,b){var c,d,e,j,k,l=this.strm,m=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?i.Z_FINISH:i.Z_NO_FLUSH,l.input="string"==typeof a?h.binstring2buf(a):a,l.next_in=0,l.avail_in=l.input.length;do{if(0===l.avail_out&&(l.output=new g.Buf8(m),l.next_out=0,l.avail_out=m),c=f.inflate(l,i.Z_NO_FLUSH),c!==i.Z_STREAM_END&&c!==i.Z_OK)return this.onEnd(c),this.ended=!0,!1;l.next_out&&(0===l.avail_out||c===i.Z_STREAM_END||0===l.avail_in&&d===i.Z_FINISH)&&("string"===this.options.to?(e=h.utf8border(l.output,l.next_out),j=l.next_out-e,k=h.buf2string(l.output,e),l.next_out=j,l.avail_out=m-j,j&&g.arraySet(l.output,l.output,e,j,0),this.onData(k)):this.onData(g.shrinkBuf(l.output,l.next_out)))}while(l.avail_in>0&&c!==i.Z_STREAM_END);return c===i.Z_STREAM_END&&(d=i.Z_FINISH),d===i.Z_FINISH?(c=f.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===i.Z_OK):!0},m.prototype.onData=function(a){this.chunks.push(a)},m.prototype.onEnd=function(a){a===i.Z_OK&&(this.result="string"===this.options.to?this.chunks.join(""):g.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=m,c.inflate=d,c.inflateRaw=e,c.ungzip=d},{"./utils/common":27,"./utils/strings":28,"./zlib/constants":30,"./zlib/gzheader":33,"./zlib/inflate.js":35,"./zlib/messages":37,"./zlib/zstream":39}],27:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;c>b;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;c>b;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],28:[function(a,b,c){"use strict";function d(a,b){if(65537>b&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;b>d;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;256>j;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=new e.Buf8(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;d>c;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;h>c;)if(f=a[c++],128>f)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&h>c;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:65536>f?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":27}],29:[function(a,b){"use strict";function c(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0}b.exports=c},{}],30:[function(a,b){b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],31:[function(a,b){"use strict";function c(){for(var a,b=[],c=0;256>c;c++){a=c;for(var d=0;8>d;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function d(a,b,c,d){var f=e,g=d+c;a=-1^a;for(var h=d;g>h;h++)a=a>>>8^f[255&(a^b[h])];return-1^a}var e=c();b.exports=d},{}],32:[function(a,b,c){"use strict";function d(a,b){return a.msg=G[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(C.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){D._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,C.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=E(a.adler,b,e,c):2===a.state.wrap&&(a.adler=F(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-jb?a.strstart-(a.w_size-jb):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ib,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&m>f);if(d=ib-(m-f),f=m-ib,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-jb)){C.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=hb)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+hb-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<hb)););}while(a.lookahead<jb&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===H)return sb;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return sb;if(a.strstart-a.block_start>=a.w_size-jb&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?sb:sb}function o(a,b){for(var c,d;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c)),a.match_length>=hb)if(d=D._tr_tally(a,a.strstart-a.match_start,a.match_length-hb),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=hb){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function p(a,b){for(var c,d,e;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=hb-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===S||a.match_length===hb&&a.strstart-a.match_start>4096)&&(a.match_length=hb-1)),a.prev_length>=hb&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-hb,d=D._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-hb),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=hb-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return sb}else if(a.match_available){if(d=D._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return sb}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=D._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ib){if(m(a),a.lookahead<=ib&&b===H)return sb;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=hb&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ib;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&f>e);a.match_length=ib-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=hb?(c=D._tr_tally(a,1,a.match_length-hb),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===H)return sb;break}if(a.match_length=0,c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function s(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=B[a.level].max_lazy,a.good_match=B[a.level].good_length,a.nice_match=B[a.level].nice_length,a.max_chain_length=B[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=hb-1,a.match_available=0,a.ins_h=0}function t(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=Y,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new C.Buf16(2*fb),this.dyn_dtree=new C.Buf16(2*(2*db+1)),this.bl_tree=new C.Buf16(2*(2*eb+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new C.Buf16(gb+1),this.heap=new C.Buf16(2*cb+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new C.Buf16(2*cb+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function u(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=X,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?lb:qb,a.adler=2===b.wrap?0:1,b.last_flush=H,D._tr_init(b),M):d(a,O)}function v(a){var b=u(a);return b===M&&s(a.state),b}function w(a,b){return a&&a.state?2!==a.state.wrap?O:(a.state.gzhead=b,M):O}function x(a,b,c,e,f,g){if(!a)return O;var h=1;if(b===R&&(b=6),0>e?(h=0,e=-e):e>15&&(h=2,e-=16),1>f||f>Z||c!==Y||8>e||e>15||0>b||b>9||0>g||g>V)return d(a,O);8===e&&(e=9);var i=new t;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+hb-1)/hb),i.window=new C.Buf8(2*i.w_size),i.head=new C.Buf16(i.hash_size),i.prev=new C.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new C.Buf8(i.pending_buf_size),i.d_buf=i.lit_bufsize>>1,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,v(a)}function y(a,b){return x(a,b,Y,$,_,W)}function z(a,b){var c,h,k,l;if(!a||!a.state||b>L||0>b)return a?d(a,O):O;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===rb&&b!==K)return d(a,0===a.avail_out?Q:O);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===lb)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=F(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=mb):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,wb),h.status=qb);else{var m=Y+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=T||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=kb),m+=31-m%31,h.status=qb,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===mb)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=nb)}else h.status=nb;if(h.status===nb)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=ob)}else h.status=ob;if(h.status===ob)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=pb)}else h.status=pb;if(h.status===pb&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=qb)):h.status=qb),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,M}else if(0===a.avail_in&&e(b)<=e(c)&&b!==K)return d(a,Q);if(h.status===rb&&0!==a.avail_in)return d(a,Q);if(0!==a.avail_in||0!==h.lookahead||b!==H&&h.status!==rb){var o=h.strategy===T?r(h,b):h.strategy===U?q(h,b):B[h.level].func(h,b);if((o===ub||o===vb)&&(h.status=rb),o===sb||o===ub)return 0===a.avail_out&&(h.last_flush=-1),M;if(o===tb&&(b===I?D._tr_align(h):b!==L&&(D._tr_stored_block(h,0,0,!1),b===J&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,M}return b!==K?M:h.wrap<=0?N:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?M:N)}function A(a){var b;return a&&a.state?(b=a.state.status,b!==lb&&b!==mb&&b!==nb&&b!==ob&&b!==pb&&b!==qb&&b!==rb?d(a,O):(a.state=null,b===qb?d(a,P):M)):O}var B,C=a("../utils/common"),D=a("./trees"),E=a("./adler32"),F=a("./crc32"),G=a("./messages"),H=0,I=1,J=3,K=4,L=5,M=0,N=1,O=-2,P=-3,Q=-5,R=-1,S=1,T=2,U=3,V=4,W=0,X=2,Y=8,Z=9,$=15,_=8,ab=29,bb=256,cb=bb+1+ab,db=30,eb=19,fb=2*cb+1,gb=15,hb=3,ib=258,jb=ib+hb+1,kb=32,lb=42,mb=69,nb=73,ob=91,pb=103,qb=113,rb=666,sb=1,tb=2,ub=3,vb=4,wb=3,xb=function(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e};B=[new xb(0,0,0,0,n),new xb(4,4,8,4,o),new xb(4,5,16,8,o),new xb(4,6,32,32,o),new xb(4,4,16,16,p),new xb(8,16,32,32,p),new xb(8,16,128,128,p),new xb(8,32,128,256,p),new xb(32,128,258,1024,p),new xb(32,258,258,4096,p)],c.deflateInit=y,c.deflateInit2=x,c.deflateReset=v,c.deflateResetKeep=u,c.deflateSetHeader=w,c.deflate=z,c.deflateEnd=A,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./messages":37,"./trees":38}],33:[function(a,b){"use strict";function c(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=c},{}],34:[function(a,b){"use strict";var c=30,d=12;b.exports=function(a,b){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;e=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=e.dmax,l=e.wsize,m=e.whave,n=e.wnext,o=e.window,p=e.hold,q=e.bits,r=e.lencode,s=e.distcode,t=(1<<e.lenbits)-1,u=(1<<e.distbits)-1;a:do{15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){e.mode=d;break a}a.msg="invalid literal/length code",e.mode=c;break a}x=65535&v,w&=15,w&&(w>q&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",e.mode=c;break a}if(y=65535&v,w&=15,w>q&&(p+=B[f++]<<q,q+=8,w>q&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",e.mode=c;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&e.sane){a.msg="invalid distance too far back",e.mode=c;break a}if(z=0,A=o,0===n){if(z+=l-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(w>n){if(z+=l+n-w,w-=n,x>w){x-=w;do C[h++]=o[z++];while(--w);if(z=0,x>n){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(g>f&&j>h);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=g>f?5+(g-f):5-(f-g),a.avail_out=j>h?257+(j-h):257-(h-j),e.hold=p,e.bits=q}},{}],35:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new r.Buf16(320),this.work=new r.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=K,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new r.Buf32(ob),b.distcode=b.distdyn=new r.Buf32(pb),b.sane=1,b.back=-1,C):F}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):F}function h(a,b){var c,d;return a&&a.state?(d=a.state,0>b?(c=0,b=-b):(c=(b>>4)+1,48>b&&(b&=15)),b&&(8>b||b>15)?F:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):F}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==C&&(a.state=null),c):F}function j(a){return i(a,rb)}function k(a){if(sb){var b;for(p=new r.Buf32(512),q=new r.Buf32(32),b=0;144>b;)a.lens[b++]=8;for(;256>b;)a.lens[b++]=9;for(;280>b;)a.lens[b++]=7;for(;288>b;)a.lens[b++]=8;for(v(x,a.lens,0,288,p,0,a.work,{bits:9}),b=0;32>b;)a.lens[b++]=5;v(y,a.lens,0,32,q,0,a.work,{bits:5}),sb=!1}a.lencode=p,a.lenbits=9,a.distcode=q,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new r.Buf8(f.wsize)),d>=f.wsize?(r.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),r.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(r.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,ob,pb,qb,rb,sb,tb,ub,vb,wb,xb,yb,zb,Ab=0,Bb=new r.Buf8(4),Cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return F;c=a.state,c.mode===V&&(c.mode=W),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xb=C;a:for(;;)switch(c.mode){case K:if(0===c.wrap){c.mode=W;break}for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0),m=0,n=0,c.mode=L;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=lb;break}if((15&m)!==J){a.msg="unknown compression method",c.mode=lb;break}if(m>>>=4,n-=4,wb=(15&m)+8,0===c.wbits)c.wbits=wb;else if(wb>c.wbits){a.msg="invalid window size",c.mode=lb;break}c.dmax=1<<wb,a.adler=c.check=1,c.mode=512&m?T:V,m=0,n=0;break;case L:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==J){a.msg="unknown compression method",c.mode=lb;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=lb;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=M;case M:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,Bb[2]=m>>>16&255,Bb[3]=m>>>24&255,c.check=t(c.check,Bb,4,0)),m=0,n=0,c.mode=N;case N:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=O;case O:if(1024&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=P;case P:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wb=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),r.arraySet(c.head.extra,e,g,q,wb)),512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=Q;case Q:if(2048&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.name+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=R;case R:if(4096&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.comment+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.comment=null);c.mode=S;case S:if(512&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=lb;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=V;break;case T:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=U;case U:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,E;a.adler=c.check=1,c.mode=V;case V:if(b===A||b===B)break a;case W:if(c.last){m>>>=7&n,n-=7&n,c.mode=ib;break}for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=X;break;case 1:if(k(c),c.mode=bb,b===B){m>>>=2,n-=2;break a}break;case 2:c.mode=$;break;case 3:a.msg="invalid block type",c.mode=lb}m>>>=2,n-=2;break;case X:for(m>>>=7&n,n-=7&n;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=lb;break}if(c.length=65535&m,m=0,n=0,c.mode=Y,b===B)break a;case Y:c.mode=Z;case Z:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;r.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=V;break;case $:for(;14>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=lb;break}c.have=0,c.mode=_;case _:for(;c.have<c.ncode;){for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Cb[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Cb[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,yb={bits:c.lenbits},xb=v(w,c.lens,0,19,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid code lengths set",c.mode=lb;break}c.have=0,c.mode=ab;case ab:for(;c.have<c.nlen+c.ndist;){for(;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(16>sb)m>>>=qb,n-=qb,c.lens[c.have++]=sb;else{if(16===sb){for(zb=qb+2;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qb,n-=qb,0===c.have){a.msg="invalid bit length repeat",c.mode=lb;break}wb=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sb){for(zb=qb+3;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=3+(7&m),m>>>=3,n-=3}else{for(zb=qb+7;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=lb;break}for(;q--;)c.lens[c.have++]=wb}}if(c.mode===lb)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=lb;break}if(c.lenbits=9,yb={bits:c.lenbits},xb=v(x,c.lens,0,c.nlen,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid literal/lengths set",c.mode=lb;break}if(c.distbits=6,c.distcode=c.distdyn,yb={bits:c.distbits},xb=v(y,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,yb),c.distbits=yb.bits,xb){a.msg="invalid distances set",c.mode=lb;break}if(c.mode=bb,b===B)break a;case bb:c.mode=cb;case cb:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,u(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===V&&(c.back=-1);break}for(c.back=0;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(rb&&0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.lencode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,c.length=sb,0===rb){c.mode=hb;break}if(32&rb){c.back=-1,c.mode=V;break}if(64&rb){a.msg="invalid literal/length code",c.mode=lb;break}c.extra=15&rb,c.mode=db;case db:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=eb;case eb:for(;Ab=c.distcode[m&(1<<c.distbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.distcode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,64&rb){a.msg="invalid distance code",c.mode=lb;break}c.offset=sb,c.extra=15&rb,c.mode=fb;case fb:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=lb;break}c.mode=gb;case gb:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.sane){a.msg="invalid distance too far back",c.mode=lb;break}q>c.wnext?(q-=c.wnext,ob=c.wsize-q):ob=c.wnext-q,q>c.length&&(q=c.length),pb=c.window}else pb=f,ob=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pb[ob++];while(--q);0===c.length&&(c.mode=cb);break;case hb:if(0===j)break a;f[h++]=c.length,j--,c.mode=cb;break;case ib:if(c.wrap){for(;32>n;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?t(c.check,f,p,h-p):s(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=lb;break}m=0,n=0}c.mode=jb;case jb:if(c.wrap&&c.flags){for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=lb;break}m=0,n=0}c.mode=kb;case kb:xb=D;break a;case lb:xb=G;break a;case mb:return H;case nb:default:return F}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<lb&&(c.mode<ib||b!==z))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=mb,H):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?t(c.check,f,p,a.next_out-p):s(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===V?128:0)+(c.mode===bb||c.mode===Y?256:0),(0===o&&0===p||b===z)&&xb===C&&(xb=I),xb)}function n(a){if(!a||!a.state)return F;var b=a.state;return b.window&&(b.window=null),a.state=null,C}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?F:(c.head=b,b.done=!1,C)):F}var p,q,r=a("../utils/common"),s=a("./adler32"),t=a("./crc32"),u=a("./inffast"),v=a("./inftrees"),w=0,x=1,y=2,z=4,A=5,B=6,C=0,D=1,E=2,F=-2,G=-3,H=-4,I=-5,J=8,K=1,L=2,M=3,N=4,O=5,P=6,Q=7,R=8,S=9,T=10,U=11,V=12,W=13,X=14,Y=15,Z=16,$=17,_=18,ab=19,bb=20,cb=21,db=22,eb=23,fb=24,gb=25,hb=26,ib=27,jb=28,kb=29,lb=30,mb=31,nb=32,ob=852,pb=592,qb=15,rb=qb,sb=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./inffast":34,"./inftrees":36}],36:[function(a,b){"use strict";var c=a("../utils/common"),d=15,e=852,f=592,g=0,h=1,i=2,j=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],k=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],l=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],m=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,n,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new c.Buf16(d+1),Q=new c.Buf16(d+1),R=null,S=0;for(D=0;d>=D;D++)P[D]=0;for(E=0;o>E;E++)P[b[n+E]]++;for(H=C,G=d;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;G>F&&0===P[F];F++);for(F>H&&(H=F),K=1,D=1;d>=D;D++)if(K<<=1,K-=P[D],0>K)return-1;if(K>0&&(a===g||1!==G))return-1;for(Q[1]=0,D=1;d>D;D++)Q[D+1]=Q[D]+P[D];for(E=0;o>E;E++)0!==b[n+E]&&(r[Q[b[n+E]]++]=E);if(a===g?(N=R=r,y=19):a===h?(N=j,O-=257,R=k,S-=257,y=256):(N=l,R=m,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===h&&L>e||a===i&&L>f)return 1;for(var T=0;;){T++,z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[n+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;G>I+J&&(K-=P[I+J],!(0>=K));)I++,K<<=1;if(L+=1<<I,a===h&&L>e||a===i&&L>f)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":27}],37:[function(a,b){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],38:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a){return 256>a?gb[a]:gb[256+(a>>>7)]}function f(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function g(a,b,c){a.bi_valid>V-c?(a.bi_buf|=b<<a.bi_valid&65535,f(a,a.bi_buf),a.bi_buf=b>>V-a.bi_valid,a.bi_valid+=c-V):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function h(a,b,c){g(a,c[2*b],c[2*b+1])}function i(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function j(a){16===a.bi_valid?(f(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function k(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;U>=f;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,c=a.heap_max+1;T>c;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function l(a,b,c){var d,e,f=new Array(U+1),g=0;for(d=1;U>=d;d++)f[d]=g=g+c[d-1]<<1;for(e=0;b>=e;e++){var h=a[2*e+1];0!==h&&(a[2*e]=i(f[h]++,h))}}function m(){var a,b,c,d,e,f=new Array(U+1);for(c=0,d=0;O-1>d;d++)for(ib[d]=c,a=0;a<1<<_[d];a++)hb[c++]=d;for(hb[c-1]=d,e=0,d=0;16>d;d++)for(jb[d]=e,a=0;a<1<<ab[d];a++)gb[e++]=d;for(e>>=7;R>d;d++)for(jb[d]=e<<7,a=0;a<1<<ab[d]-7;a++)gb[256+e++]=d;for(b=0;U>=b;b++)f[b]=0;for(a=0;143>=a;)eb[2*a+1]=8,a++,f[8]++;for(;255>=a;)eb[2*a+1]=9,a++,f[9]++;for(;279>=a;)eb[2*a+1]=7,a++,f[7]++;for(;287>=a;)eb[2*a+1]=8,a++,f[8]++;for(l(eb,Q+1,f),a=0;R>a;a++)fb[2*a+1]=5,fb[2*a]=i(a,5);kb=new nb(eb,_,P+1,Q,U),lb=new nb(fb,ab,0,R,U),mb=new nb(new Array(0),bb,0,S,W)}function n(a){var b;for(b=0;Q>b;b++)a.dyn_ltree[2*b]=0;for(b=0;R>b;b++)a.dyn_dtree[2*b]=0;for(b=0;S>b;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*X]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function o(a){a.bi_valid>8?f(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function p(a,b,c,d){o(a),d&&(f(a,c),f(a,~c)),E.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function q(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function r(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&q(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!q(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function s(a,b,c){var d,f,i,j,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],f=a.pending_buf[a.l_buf+k],k++,0===d?h(a,f,b):(i=hb[f],h(a,i+P+1,b),j=_[i],0!==j&&(f-=ib[i],g(a,f,j)),d--,i=e(d),h(a,i,c),j=ab[i],0!==j&&(d-=jb[i],g(a,d,j)));while(k<a.last_lit);h(a,X,b)}function t(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=T,c=0;i>c;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=2>j?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)r(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],r(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,r(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],k(a,b),l(f,j,a.bl_count)}function u(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;c>=d;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(j>h?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*Y]++):10>=h?a.bl_tree[2*Z]++:a.bl_tree[2*$]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function v(a,b,c){var d,e,f=-1,i=b[1],j=0,k=7,l=4;for(0===i&&(k=138,l=3),d=0;c>=d;d++)if(e=i,i=b[2*(d+1)+1],!(++j<k&&e===i)){if(l>j){do h(a,e,a.bl_tree);while(0!==--j)}else 0!==e?(e!==f&&(h(a,e,a.bl_tree),j--),h(a,Y,a.bl_tree),g(a,j-3,2)):10>=j?(h(a,Z,a.bl_tree),g(a,j-3,3)):(h(a,$,a.bl_tree),g(a,j-11,7));j=0,f=e,0===i?(k=138,l=3):e===i?(k=6,l=3):(k=7,l=4)}}function w(a){var b;for(u(a,a.dyn_ltree,a.l_desc.max_code),u(a,a.dyn_dtree,a.d_desc.max_code),t(a,a.bl_desc),b=S-1;b>=3&&0===a.bl_tree[2*cb[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function x(a,b,c,d){var e;for(g(a,b-257,5),g(a,c-1,5),g(a,d-4,4),e=0;d>e;e++)g(a,a.bl_tree[2*cb[e]+1],3);v(a,a.dyn_ltree,b-1),v(a,a.dyn_dtree,c-1)}function y(a){var b,c=4093624447;for(b=0;31>=b;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return G;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return H;for(b=32;P>b;b++)if(0!==a.dyn_ltree[2*b])return H;return G}function z(a){pb||(m(),pb=!0),a.l_desc=new ob(a.dyn_ltree,kb),a.d_desc=new ob(a.dyn_dtree,lb),a.bl_desc=new ob(a.bl_tree,mb),a.bi_buf=0,a.bi_valid=0,n(a)}function A(a,b,c,d){g(a,(J<<1)+(d?1:0),3),p(a,b,c,!0)}function B(a){g(a,K<<1,3),h(a,X,eb),j(a)}function C(a,b,c,d){var e,f,h=0;a.level>0?(a.strm.data_type===I&&(a.strm.data_type=y(a)),t(a,a.l_desc),t(a,a.d_desc),h=w(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,e>=f&&(e=f)):e=f=c+5,e>=c+4&&-1!==b?A(a,b,c,d):a.strategy===F||f===e?(g(a,(K<<1)+(d?1:0),3),s(a,eb,fb)):(g(a,(L<<1)+(d?1:0),3),x(a,a.l_desc.max_code+1,a.d_desc.max_code+1,h+1),s(a,a.dyn_ltree,a.dyn_dtree)),n(a),d&&o(a)}function D(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(hb[c]+P+1)]++,a.dyn_dtree[2*e(b)]++),a.last_lit===a.lit_bufsize-1}var E=a("../utils/common"),F=4,G=0,H=1,I=2,J=0,K=1,L=2,M=3,N=258,O=29,P=256,Q=P+1+O,R=30,S=19,T=2*Q+1,U=15,V=16,W=7,X=256,Y=16,Z=17,$=18,_=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ab=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],bb=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],db=512,eb=new Array(2*(Q+2));d(eb);var fb=new Array(2*R);d(fb);var gb=new Array(db);d(gb);var hb=new Array(N-M+1);d(hb);var ib=new Array(O);d(ib);var jb=new Array(R);d(jb);var kb,lb,mb,nb=function(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length},ob=function(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b},pb=!1;c._tr_init=z,c._tr_stored_block=A,c._tr_flush_block=C,c._tr_tally=D,c._tr_align=B},{"../utils/common":27}],39:[function(a,b){"use strict";function c(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=c},{}]},{},[9])(9)});'use strict';if(tr.isVinn){global.window={};}'use strict';if(tr.isVinn){global.JSZip=global.window.JSZip;global.window=undefined;}else if(tr.isNode){const jsZipAbsPath=HTMLImportsLoader.hrefToAbsolutePath('/jszip.min.js');const jsZipModule=require(jsZipAbsPath);global.JSZip=jsZipModule;}'use strict';tr.exportTo('tr.e.importer',function(){const GZIP_MEMBER_HEADER_ID_SIZE=3;const GZIP_HEADER_ID1=0x1f;const GZIP_HEADER_ID2=0x8b;const GZIP_DEFLATE_COMPRESSION=8;function GzipImporter(model,eventData){this.inflateAsTraceStream=false;if(typeof(eventData)==='string'||eventData instanceof String){eventData=JSZip.utils.transformTo('uint8array',eventData);}else if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);}else if(eventData instanceof tr.b.InMemoryTraceStream){eventData=eventData.data;this.inflateAsTraceStream_=true;}else{throw new Error('Unknown gzip data format');} this.model_=model;this.gzipData_=eventData;} GzipImporter.canImport=function(eventData){if(eventData instanceof tr.b.InMemoryTraceStream){eventData=eventData.header;} let header;if(eventData instanceof ArrayBuffer){header=new Uint8Array(eventData.slice(0,GZIP_MEMBER_HEADER_ID_SIZE));}else if(typeof(eventData)==='string'||eventData instanceof String){header=eventData.substring(0,GZIP_MEMBER_HEADER_ID_SIZE);header=JSZip.utils.transformTo('uint8array',header);}else{return false;} @@ -5700,7 +5700,7 @@ XMarkerAnnotationView.prototype={__proto__:tr.ui.annotations.AnnotationView.prototype,draw(ctx){const dt=this.viewport_.currentDisplayTransform;const viewX=dt.xWorldToView(this.annotation_.timestamp);ctx.beginPath();tr.ui.b.drawLine(ctx,viewX,0,viewX,ctx.canvas.height);ctx.strokeStyle=this.annotation_.strokeStyle;ctx.stroke();}};return{XMarkerAnnotationView,};});'use strict';tr.exportTo('tr.model',function(){function XMarkerAnnotation(timestamp){tr.model.Annotation.apply(this,arguments);this.timestamp=timestamp;this.strokeStyle='rgba(0, 0, 255, 0.5)';} XMarkerAnnotation.fromDict=function(dict){return new XMarkerAnnotation(dict.args.timestamp);};XMarkerAnnotation.prototype={__proto__:tr.model.Annotation.prototype,toDict(){return{typeName:'xmarker',args:{timestamp:this.timestamp}};},createView_(viewport){return new tr.ui.annotations.XMarkerAnnotationView(viewport,this);}};tr.model.Annotation.register(XMarkerAnnotation,{typeName:'xmarker'});return{XMarkerAnnotation,};});'use strict';tr.exportTo('tr.e.importer',function(){const Base64=tr.b.Base64;const deepCopy=tr.b.deepCopy;const ColorScheme=tr.b.ColorScheme;const HeapDumpTraceEventImporter=tr.e.importer.HeapDumpTraceEventImporter;const LegacyHeapDumpTraceEventImporter=tr.e.importer.LegacyHeapDumpTraceEventImporter;const StreamingEventExpander=tr.e.importer.StreamingEventExpander;const ProfilingDictionaryReader=tr.e.importer.ProfilingDictionaryReader;function getEventColor(event,opt_customName){if(event.cname){return ColorScheme.getColorIdForReservedName(event.cname);}else if(opt_customName||event.name){return ColorScheme.getColorIdForGeneralPurposeString(opt_customName||event.name);}} function isLegacyChromeClockSyncEvent(event){return event.name!==undefined&&event.name.startsWith(LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX)&&((event.ph==='S')||(event.ph==='F'));} -const PRODUCER='producer';const CONSUMER='consumer';const STEP='step';const BACKGROUND=tr.model.ContainerMemoryDump.LevelOfDetail.BACKGROUND;const LIGHT=tr.model.ContainerMemoryDump.LevelOfDetail.LIGHT;const DETAILED=tr.model.ContainerMemoryDump.LevelOfDetail.DETAILED;const MEMORY_DUMP_LEVEL_OF_DETAIL_ORDER=[undefined,BACKGROUND,LIGHT,DETAILED];const GLOBAL_MEMORY_ALLOCATOR_DUMP_PREFIX='global/';const LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX='ClockSyncEvent.';const BYTE_STAT_NAME_MAP={'pc':'privateCleanResident','pd':'privateDirtyResident','sc':'sharedCleanResident','sd':'sharedDirtyResident','pss':'proportionalResident','sw':'swapped'};const WEAK_MEMORY_ALLOCATOR_DUMP_FLAG=1<<0;const OBJECT_TYPE_NAME_PATTERNS=[{prefix:'const char *WOW::getStringWithTypeName() [T = ',suffix:']'},{prefix:'const char* WOW::getStringWithTypeName() [with T = ',suffix:']'},{prefix:'const char *__cdecl WOW::getStringWithTypeName<',suffix:'>(void)'}];const SUBTRACE_FIELDS=new Set(['powerTraceAsString','systemTraceEvents',]);const NON_METADATA_FIELDS=new Set(['displayTimeUnit','samples','stackFrames','traceAnnotations','traceEvents',...SUBTRACE_FIELDS]);function TraceEventImporter(model,eventData){this.hasEvents_=undefined;this.importPriority=1;this.model_=model;this.events_=undefined;this.sampleEvents_=undefined;this.stackFrameEvents_=undefined;this.stackFrameTree_=new tr.model.ProfileTree();this.subtraces_=[];this.eventsWereFromString_=false;this.softwareMeasuredCpuCount_=undefined;this.allAsyncEvents_=[];this.allFlowEvents_=[];this.allObjectEvents_=[];this.contextProcessorPerThread={};this.traceEventSampleStackFramesByName_={};this.v8ProcessCodeMaps_={};this.v8ProcessRootStackFrame_={};this.v8SamplingData_=[];this.profileTrees_=new Map();this.profileInfo_=new Map();this.legacyChromeClockSyncStartEvent_=undefined;this.legacyChromeClockSyncFinishEvent_=undefined;this.allMemoryDumpEvents_={};this.heapProfileExpander=new ProfilingDictionaryReader();this.objectTypeNameMap_={};this.clockDomainId_=tr.model.ClockDomainId.UNKNOWN_CHROME_LEGACY;this.toModelTime_=undefined;if(typeof(eventData)==='string'||eventData instanceof String){eventData=eventData.trim();if(eventData[0]==='['){eventData=eventData.replace(/\s*,\s*$/,'');if(eventData[eventData.length-1]!==']'){eventData=eventData+']';}} +const PRODUCER='producer';const CONSUMER='consumer';const STEP='step';const BACKGROUND=tr.model.ContainerMemoryDump.LevelOfDetail.BACKGROUND;const LIGHT=tr.model.ContainerMemoryDump.LevelOfDetail.LIGHT;const DETAILED=tr.model.ContainerMemoryDump.LevelOfDetail.DETAILED;const MEMORY_DUMP_LEVEL_OF_DETAIL_ORDER=[undefined,BACKGROUND,LIGHT,DETAILED];const GLOBAL_MEMORY_ALLOCATOR_DUMP_PREFIX='global/';const LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX='ClockSyncEvent.';const BYTE_STAT_NAME_MAP={'pc':'privateCleanResident','pd':'privateDirtyResident','sc':'sharedCleanResident','sd':'sharedDirtyResident','pss':'proportionalResident','sw':'swapped'};const WEAK_MEMORY_ALLOCATOR_DUMP_FLAG=1<<0;const OBJECT_TYPE_NAME_PATTERNS=[{prefix:'const char *WTF::getStringWithTypeName() [T = ',suffix:']'},{prefix:'const char* WTF::getStringWithTypeName() [with T = ',suffix:']'},{prefix:'const char *__cdecl WTF::getStringWithTypeName<',suffix:'>(void)'}];const SUBTRACE_FIELDS=new Set(['powerTraceAsString','systemTraceEvents',]);const NON_METADATA_FIELDS=new Set(['displayTimeUnit','samples','stackFrames','traceAnnotations','traceEvents',...SUBTRACE_FIELDS]);function TraceEventImporter(model,eventData){this.hasEvents_=undefined;this.importPriority=1;this.model_=model;this.events_=undefined;this.sampleEvents_=undefined;this.stackFrameEvents_=undefined;this.stackFrameTree_=new tr.model.ProfileTree();this.subtraces_=[];this.eventsWereFromString_=false;this.softwareMeasuredCpuCount_=undefined;this.allAsyncEvents_=[];this.allFlowEvents_=[];this.allObjectEvents_=[];this.contextProcessorPerThread={};this.traceEventSampleStackFramesByName_={};this.v8ProcessCodeMaps_={};this.v8ProcessRootStackFrame_={};this.v8SamplingData_=[];this.profileTrees_=new Map();this.profileInfo_=new Map();this.legacyChromeClockSyncStartEvent_=undefined;this.legacyChromeClockSyncFinishEvent_=undefined;this.allMemoryDumpEvents_={};this.heapProfileExpander=new ProfilingDictionaryReader();this.objectTypeNameMap_={};this.clockDomainId_=tr.model.ClockDomainId.UNKNOWN_CHROME_LEGACY;this.toModelTime_=undefined;if(typeof(eventData)==='string'||eventData instanceof String){eventData=eventData.trim();if(eventData[0]==='['){eventData=eventData.replace(/\s*,\s*$/,'');if(eventData[eventData.length-1]!==']'){eventData=eventData+']';}} this.events_=JSON.parse(eventData);this.eventsWereFromString_=true;}else{this.events_=eventData;} if(this.events_.traceEvents){const container=this.events_;this.events_=this.events_.traceEvents;for(const subtraceField of SUBTRACE_FIELDS){if(container[subtraceField]){this.storeSubtrace_(container[subtraceField]);}} this.storeSamples_(container.samples);this.storeStackFrames_(container.stackFrames);this.storeDisplayTimeUnit_(container.displayTimeUnit);this.storeTraceAnnotations_(container.traceAnnotations);this.storeMetadata_(container);}else if(this.events_ instanceof tr.b.TraceStream){const parser=oboe().node('{cat ph}',function(e){return oboe.drop;}).node('!.powerTraceAsString',this.storeSubtrace_.bind(this)).node('!.systemTraceEvents',this.storeSubtrace_.bind(this)).node('!.samples',this.storeSamples_.bind(this)).node('!.stackFrames',this.storeStackFrames_.bind(this)).node('!.displayTimeUnit',this.storeDisplayTimeUnit_.bind(this)).node('!.traceAnnotations',this.storeTraceAnnotations_.bind(this)).done(this.storeMetadata_.bind(this));this.events_.rewind();while(this.events_.hasData){parser.write(this.events_.readNumBytes());} @@ -7461,10 +7461,10 @@ if(dict.callback===undefined){throw new Error('callback must be given');} this.eventType_=dict.eventType;this.keyCodes_=[];if(dict.keyCode){this.pushKeyCode_(dict.keyCode);}else if(dict.keyCodes){dict.keyCodes.forEach(this.pushKeyCode_,this);} this.useCapture_=!!dict.useCapture;this.callback_=dict.callback;this.thisArg_=dict.thisArg!==undefined?dict.thisArg:undefined;this.helpText_=dict.helpText!==undefined?dict.helpText:undefined;} -HotKey.prototype={get eventType(){return this.eventType_;},get keyCodes(){return this.keyCodes_;},get helpText(){return this.helpText_;},call(e){this.callback_.call(this.thisArg_,e);},pushKeyCode_(keyCode){this.keyCodes_.push(keyCode);}};return{HotKey,};});'use strict';Polymer({is:'tv-ui-b-hotkey-controller',created(){this.isAttached_=false;this.globalMode_=false;this.coupledToParentController_=undefined;this.curHost_=undefined;this.childControllers_=[];this.bubblingKeyDownHotKeys_={};this.capturingKeyDownHotKeys_={};this.bubblingKeyPressHotKeys_={};this.capturingKeyPressHotKeys_={};this.onBubblingKeyDown_=this.onKey_.bind(this,false);this.onCapturingKeyDown_=this.onKey_.bind(this,true);this.onBubblingKeyPress_=this.onKey_.bind(this,false);this.onCapturingKeyPress_=this.onKey_.bind(this,true);},attached(){this.isAttached_=true;const host=this.findHost_();if(host.__hotkeyController){throw new Error('Multiple hotkey controllers attached to this host');} +HotKey.prototype={get eventType(){return this.eventType_;},get keyCodes(){return this.keyCodes_;},get helpText(){return this.helpText_;},call(e){this.callback_.call(this.thisArg_,e);},pushKeyCode_(keyCode){this.keyCodes_.push(keyCode);}};return{HotKey,};});'use strict';Polymer({is:'tv-ui-b-hotkey-controller',created(){this.isAttached_=false;this.globalMode_=false;this.slavedToParentController_=undefined;this.curHost_=undefined;this.childControllers_=[];this.bubblingKeyDownHotKeys_={};this.capturingKeyDownHotKeys_={};this.bubblingKeyPressHotKeys_={};this.capturingKeyPressHotKeys_={};this.onBubblingKeyDown_=this.onKey_.bind(this,false);this.onCapturingKeyDown_=this.onKey_.bind(this,true);this.onBubblingKeyPress_=this.onKey_.bind(this,false);this.onCapturingKeyPress_=this.onKey_.bind(this,true);},attached(){this.isAttached_=true;const host=this.findHost_();if(host.__hotkeyController){throw new Error('Multiple hotkey controllers attached to this host');} host.__hotkeyController=this;this.curHost_=host;let parentElement;if(host.parentElement){parentElement=host.parentElement;}else{parentElement=Polymer.dom(host).parentNode.host;} -const parentController=tr.b.getHotkeyControllerForElement(parentElement);if(parentController){this.coupledToParentController_=parentController;parentController.addChildController_(this);return;} -host.addEventListener('keydown',this.onBubblingKeyDown_,false);host.addEventListener('keydown',this.onCapturingKeyDown_,true);host.addEventListener('keypress',this.onBubblingKeyPress_,false);host.addEventListener('keypress',this.onCapturingKeyPress_,true);},detached(){this.isAttached_=false;const host=this.curHost_;if(!host)return;delete host.__hotkeyController;this.curHost_=undefined;if(this.coupledToParentController_){this.coupledToParentController_.removeChildController_(this);this.coupledToParentController_=undefined;return;} +const parentController=tr.b.getHotkeyControllerForElement(parentElement);if(parentController){this.slavedToParentController_=parentController;parentController.addChildController_(this);return;} +host.addEventListener('keydown',this.onBubblingKeyDown_,false);host.addEventListener('keydown',this.onCapturingKeyDown_,true);host.addEventListener('keypress',this.onBubblingKeyPress_,false);host.addEventListener('keypress',this.onCapturingKeyPress_,true);},detached(){this.isAttached_=false;const host=this.curHost_;if(!host)return;delete host.__hotkeyController;this.curHost_=undefined;if(this.slavedToParentController_){this.slavedToParentController_.removeChildController_(this);this.slavedToParentController_=undefined;return;} host.removeEventListener('keydown',this.onBubblingKeyDown_,false);host.removeEventListener('keydown',this.onCapturingKeyDown_,true);host.removeEventListener('keypress',this.onBubblingKeyPress_,false);host.removeEventListener('keypress',this.onCapturingKeyPress_,true);},addChildController_(controller){const i=this.childControllers_.indexOf(controller);if(i!==-1){throw new Error('Controller already registered');} this.childControllers_.push(controller);},removeChildController_(controller){const i=this.childControllers_.indexOf(controller);if(i===-1){throw new Error('Controller not registered');} this.childControllers_.splice(i,1);return controller;},getKeyMapForEventType_(eventType,useCapture){if(eventType==='keydown'){if(!useCapture){return this.bubblingKeyDownHotKeys_;} @@ -7479,7 +7479,7 @@ keyMap[keyCode]=hotKey;} for(let i=0;i<hotKey.keyCodes.length;i++){const keyCode=hotKey.keyCodes[i];delete keyMap[keyCode];} return hotKey;},get globalMode(){return this.globalMode_;},set globalMode(globalMode){const wasAttached=this.isAttached_;if(wasAttached){this.detached();} -this.globalMode_=!!globalMode;if(wasAttached){this.attached();}},get topmostConroller_(){if(this.coupledToParentController_){return this.coupledToParentController_.topmostConroller_;} +this.globalMode_=!!globalMode;if(wasAttached){this.attached();}},get topmostConroller_(){if(this.slavedToParentController_){return this.slavedToParentController_.topmostConroller_;} return this;},childRequestsGeneralFocus(child){const topmost=this.topmostConroller_;if(topmost.curHost_){if(topmost.curHost_.hasAttribute('tabIndex')){topmost.curHost_.focus();}else{if(document.activeElement){document.activeElement.blur();}}}else{if(document.activeElement){document.activeElement.blur();}}},childRequestsBlur(child){child.blur();const topmost=this.topmostConroller_;if(topmost.curHost_){topmost.curHost_.focus();}},findHost_(){if(this.globalMode_)return document.body;if(this.parentElement)return this.parentElement;if(!Polymer.dom(this).parentNode)return this.host;let node=this.parentNode;while(Polymer.dom(node).parentNode)node=Polymer.dom(node).parentNode;return node.host;},appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e){const localKeyMap=this.getKeyMapForEventType_(e.type,useCapture);const localHotKey=localKeyMap[e.keyCode];if(localHotKey){matchedHotKeys.push(localHotKey);} for(let i=0;i<this.childControllers_.length;i++){const controller=this.childControllers_[i];controller.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);}},onKey_(useCapture,e){if(!useCapture&&e.path[0].tagName==='INPUT')return;let sortedControllers;const matchedHotKeys=[];this.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);if(matchedHotKeys.length===0)return false;if(matchedHotKeys.length>1){throw new Error('More than one hotKey is currently unsupported');} const hotKey=matchedHotKeys[0];let prevented=0;prevented|=hotKey.call(e);return!prevented&&e.defaultPrevented;}});'use strict';tr.exportTo('tr.b',function(){function getHotkeyControllerForElement(refElement){let curElement=refElement;while(curElement){if(curElement.tagName==='tv-ui-b-hotkey-controller'){return curElement;} @@ -7829,7 +7829,7 @@ const ans={supported:false};for(const proc of Object.values(m.processes)){proc.objects.iterObjectInstances(function(instance){if(instance instanceof BlameContextInstance){ans.supported=true;}});} if(!ans.supported){ans.reason='No frame data available';} return ans;},get currentRangeOfInterest(){if(this.rangeOfInterest_.isEmpty){return this.model_.bounds;} -return this.rangeOfInterest_;},get rangeOfInterest(){return this.rangeOfInterest_;},set rangeOfInterest(rangeOfInterest){this.rangeOfInterest_=rangeOfInterest;this.updateContents_();},get selection(){},set selection(_){},get textLabel(){return'Frame Data';},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();}});tr.ui.side_panel.SidePanelRegistry.register(function(){return document.createElement('tr-ui-e-s-frame-data-side-panel');});});'use strict';Polymer({is:'tr-ui-b-chart-legend-key',ready(){this.$.checkbox.addEventListener('change',this.onCheckboxChange_.bind(this));},onCheckboxChange_(){tr.b.dispatchSimpleEvent(this,tr.ui.b.DataSeriesEnableChangeEventType,true,false,{key:Polymer.dom(this).textContent,enabled:this.enabled});},set textContent(t){Polymer.dom(this.$.label).textContent=t;Polymer.dom(this.$.link).textContent=t;this.updateContents_();},set width(w){w-=20;this.$.link.style.width=w+'px';this.$.label.style.width=w+'px';},get textContent(){return Polymer.dom(this.$.label).textContent;},set optional(optional){this.$.checkbox.style.visibility=optional?'visible':'hidden';},get optional(){return this.$.checkbox.style.visibility==='visible';},set enabled(enabled){this.$.checkbox.checked=enabled?'checked':'';},get enabled(){return this.$.checkbox.checked;},set color(c){this.$.label.style.color=c;this.$.link.color=c;},set target(target){this.$.link.setSelectionAndContent(target,Polymer.dom(this.$.label).textContent);this.updateContents_();},get target(){return this.$.link.selection;},set title(title){this.$.link.title=title;},updateContents_(){this.$.link.style.display=this.target?'':'none';this.$.label.style.display=this.target?'none':'';this.$.label.htmlFor=this.optional?'checkbox':'';}});'use strict';(function(window){window.define=function(x){window.d3=x;};window.define.amd=true;})(this);!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function d(){Xo.event.preventDefault()}function m(){for(var n,t=Xo.event;n=t.sourceEvent;)t=n;return t}function y(n){for(var t=new p,e=0,r=arguments.length;++e<r;)t[arguments[e]]=v(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=Xo.event;u.target=n,Xo.event=u,t[u.type].apply(e,r)}finally{Xo.event=i}}},t}function x(n){return fa(n,da),n}function M(n){return"function"==typeof n?n:function(){return ha(n,this)}}function _(n){return"function"==typeof n?n:function(){return ga(n,this)}}function b(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=Xo.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function w(n){return n.trim().replace(/\s+/g," ")}function S(n){return new RegExp("(?:^|\\s+)"+Xo.requote(n)+"(?:\\s+|$)","g")}function k(n){return n.trim().split(/^|\s+/)}function E(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=k(n).map(A);var u=n.length;return"function"==typeof t?r:e}function A(n){var t=S(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",w(u+" "+n))):e.setAttribute("class",w(u.replace(t," ")))}}function C(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function N(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function L(n){return"function"==typeof n?n:(n=Xo.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function T(n){return{__data__:n}}function q(n){return function(){return va(this,n)}}function z(n){return arguments.length||(n=Xo.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function R(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function U(){var n=this.__transition__;n&&++n.active}function j(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=c(t,Bo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+Xo.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),c=H;a>0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o,a=0,c=0,s=0;if(u=/([a-z]+)\((.*)\)/i.exec(n))switch(i=u[2].split(","),u[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Mt(i[0]),Mt(i[1]),Mt(i[2]))}return(o=Va.get(n))?t(o.r,o.g,o.b):(null!=n&&"#"===n.charAt(0)&&(r=parseInt(n.substring(1),16),isNaN(r)||(4===n.length?(a=(3840&r)>>4,a=a>>4|a,c=240&r,c=c>>4|c,s=15&r,s=s<<4|s):7===n.length&&(a=(16711680&r)>>16,c=(65280&r)>>8,s=255&r))),t(a,c,s))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return $a=n,e}function Nt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Lt(n,t){var e=Math.pow(10,3*oa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Tt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function zt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=zt;var r=new zt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=zt;var r=new zt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=uc[e=n.charAt(++a)])&&(e=n.charAt(++a)),(i=C[e])&&(e=i(t,null==u?"e"===e?" ":"0":u)),o.push(e),c=a+1);return o.push(n.substring(c,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},u=e(r,n,t,0);if(u!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var i=null!=r.Z&&ec!==zt,o=new(i?zt:ec);return"j"in r?o.setFullYear(r.y,0,r.j):"w"in r&&("W"in r||"U"in r)?(o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+Math.floor(r.Z/100),r.M+r.Z%100,r.S,r.L),i?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=zt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=zt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function Ft(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ot(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Yt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function It(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Zt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.y=Xt(+r[0]),e+r[0].length):-1}function Vt(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Xt(n){return n+(n>68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function re(){}function ue(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function ie(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function oe(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function ae(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)oe(n[e],t,1);t.polygonEnd()}function ce(){function n(n,t){n*=Na,t=t*Na/2+Sa/4;var e=n-r,o=e>=0?1:-1,a=o*e,c=Math.cos(t),s=Math.sin(t),l=i*s,f=u*c+l*Math.cos(a),h=l*o*Math.sin(a);hc.add(Math.atan2(h,f)),r=n,u=c,i=s}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*Na,u=Math.cos(a=(e=a)*Na/2+Sa/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function se(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function le(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function fe(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function they(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ge(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function pe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function ve(n){return[Math.atan2(n[1],n[0]),X(n[2])]}function de(n,t){return oa(n[0]-t[0])<Aa&&oa(n[1]-t[1])<Aa}function me(n,t){n*=Na;var e=Math.cos(t*=Na);ye(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function ye(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function xe(){function n(n,u){n*=Na;var i=Math.cos(u*=Na),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),ye(t,e,r)}var t,e,r;kc.point=function(u,i){u*=Na;var o=Math.cos(i*=Na);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,ye(t,e,r)}}function Me(){kc.point=me}function _e(){function n(n,t){n*=Na;var e=Math.cos(t*=Na),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-V(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),ye(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=Na;var c=Math.cos(a*=Na);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),ye(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Me,kc.point=me}}function be(){return!0}function we(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(de(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function ke(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Ee(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Sa,k=p*x;if(hc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*ka:_,S^h>=e^m>=e){var E=fe(se(f),se(n));pe(E);var A=fe(u,E);pe(A);var C=(S^_>=0?-1:1)*X(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function Te(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)<Aa?(n.point(e,r=(r+o)/2>0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)<Aa&&(e-=u*Aa),oa(i-a)<Aa&&(i-=a*Aa),r=qe(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function qe(n,t,e,r){var u,i,o=Math.sin(n-e);return oa(o)>Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function ze(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]<t[0]?Sa:-Sa;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function Re(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);they(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(they(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)<Aa,N=C||Aa>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)<Aa?k:E):k<=_[1]&&_[1]<=E:A>Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return they(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)<Aa?u>0?0:3:oa(r[0]-e)<Aa?u>0?2:1:oa(r[1]-t)<Aa?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function They(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)<Aa||oa(r-h)<Aa?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||oa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)<Aa?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Sa/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=I(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ea]},e):xr}function yr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return oa(u)<Aa?er:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-I(u)*Math.sqrt(n*n+e*e)]},e)}function xr(n,t){return[n,Math.log(Math.tan(Sa/4+t/2))]}function Mr(n){var t,e=Qe(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=Sa*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function _r(n,t){return[Math.log(Math.tan(Sa/4+t/2)),-n]}function br(n){return n[0]}function wr(n){return n[1]}function Sr(n){for(var t=n.length,e=[0,1],r=2,u=2;t>u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function Tr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Tr(n);for(var c=i;c.circle&&oa(e-c.circle.x)<Aa&&oa(r-c.circle.cy)<Aa;)i=c.P,a.unshift(c),Tr(c),c=i;a.unshift(c),Or(c);for(var s=o;s.circle&&oa(e-s.circle.x)<Aa&&oa(r-s.circle.cy)<Aa;)o=s.N,a.push(s),Tr(s),s=o;a.push(s),Or(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function zr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)<Aa&&p-u>Aa?{x:f,y:oa(t-f)<Aa?e:p}:oa(u-p)<Aa&&h-r>Aa?{x:oa(e-p)<Aa?t:h,y:p}:oa(r-h)<Aa&&u-g>Aa?{x:h,y:oa(t-h)<Aa?e:g}:oa(u-g)<Aa&&r-f>Aa?{x:oa(e-g)<Aa?t:f,y:g}:null),i.site,null)),++c)}function jr(n,t){return t.angle-n.angle}function Hr(){Jr(this),this.x=this.y=this.arc=this.site=this.cy=null}function Fr(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function Or(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),Jr(t),n.circle=null)}function Yr(n){for(var t,e=Vc,r=De(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!Ir(t,n)||!r(t)||oa(t.a.x-t.b.x)<Aa&&oa(t.a.y-t.b.y)<Aa)&&(t.a=t.b=null,e.splice(u,1))}function Ir(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2;if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function Zr(n,t){this.l=n,this.r=t,this.a=this.b=null}function Vr(n,t,e,r){var u=new Zr(n,t);return Vc.push(u),e&&$r(u,n,t,e),r&&$r(u,t,n,r),Xc[n.i].edges.push(new Br(u,n,t)),Xc[t.i].edges.push(new Br(u,t,n)),u}function Xr(n,t,e){var r=new Zr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function $r(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function Br(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function Wr(){this._=null}function Jr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function Gr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function Kr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Qr(n){for(;n.L;)n=n.L;return n}function nu(n,t){var e,r,u,i=n.sort(tu).pop();for(Vc=[],Xc=new Array(n.length),$c=new Wr,Wc=new Wr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new Pr(i),zr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;qr(u.arc)}t&&(Yr(t),Ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function tu(n,t){return t.y-n.y||t.x-n.x}function eu(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function ru(n){return n.x}function uu(n){return n.y}function iu(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function ou(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&ou(n,c[0],e,r,o,a),c[1]&&ou(n,c[1],o,r,u,a),c[2]&&ou(n,c[2],e,a,o,i),c[3]&&ou(n,c[3],o,a,u,i)}}function au(n,t){n=Xo.rgb(n),t=Xo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+vt(Math.round(e+i*n))+vt(Math.round(r+o*n))+vt(Math.round(u+a*n))}}function cu(n,t){var e,r={},u={};for(e in n)e in t?r[e]=fu(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function su(n,t){return t-=n=+n,function(e){return n+t*e}}function lu(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=Tu(t,e),i=qu(zu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*La,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*La:0}function Tu(n,t){return n[0]*t[0]+n[1]*t[1]}function qu(n){var t=Math.sqrt(Tu(n,n));return t&&(n[0]/=t,n[1]/=t),t}function zu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ru(n,t){var e,r=[],u=[],i=Xo.transform(n),o=Xo.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:su(a[0],c[0])},{i:3,x:su(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function Du(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function Pu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function Uu(n){for(var t=n.source,e=n.target,r=Hu(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function ju(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Hu(n,t){if(n===t)return n;for(var e=ju(n),r=ju(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function Fu(n){n.fixed|=2}function Ou(n){n.fixed&=-7}function Yu(n){n.fixed|=4,n.px=n.x,n.py=n.y}function Iu(n){n.fixed&=-5}function Zu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(Zu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function Vu(n,t){return Xo.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=Wu,n}function Xu(n){return n.children}function $u(n){return n.value}function Bu(n,t){return t.value-n.value}function Wu(n){return Xo.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function Ju(n){return n.x}function Gu(n){return n.y}function Ku(n,t,e){n.y0=t,n.y=e}function Qu(n){return Xo.range(n.length)}function ni(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function ti(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=li(e[i],t),n)>0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function vi(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Mi(r,u=a):Mi(r=c,u),o--):(xi(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)ki(u[i],t,e,r)}function Ei(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Ai(n){return 1+Xo.max(n,function(n){return n.y})}function Ci(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ni(n){var t=n.children;return t&&t.length?Ni(t[0]):n}function Li(n){var t,e=n.children;return e&&(t=e.length)?Li(e[t-1]):n}function Ti(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function qi(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function zi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():zi(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=Xo.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function Hi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=zi(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=zi(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return zi(t.a[0])},e.copy=function(){return Ji(n,t)},e.domain(n)}function Gi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=Xo.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[Xo.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort(Xo.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return Gi(n,t)},e()}function Ki(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=br,r=wr,u=be,i=oo,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=Ms.get(n)||oo).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function oo(n){return n.join("L")}function ao(n){return oo(n)+"Z"}function co(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function lo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function fo(n,t){return n.length<4?oo(n):n[1]+po(n.slice(1,n.length-1),vo(n,t))}function ho(n,t){return n.length<3?oo(n):n[0]+po((n.push(n[0]),n),vo([n[n.length-2]].concat(n,[n[1]]),t))}function go(n,t){return n.length<3?oo(n):n[0]+po(n,vo(n,t))}function po(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return oo(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function vo(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function mo(n){if(n.length<3)return oo(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",_o(ws,o),",",_o(ws,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),bo(c,o,a);return n.pop(),c.push("L",r),c.join("")}function yo(n){if(n.length<4)return oo(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(_o(ws,i)+","+_o(ws,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),bo(e,i,o);return e.join("")}function xo(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[_o(ws,o),",",_o(ws,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),bo(t,o,a);return t.join("")}function Mo(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return mo(n)}function _o(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function bo(n,t,e){n.push("C",_o(_s,t),",",_o(_s,e),",",_o(bs,t),",",_o(bs,e),",",_o(ws,t),",",_o(ws,e))}function wo(n,t){return(t[1]-n[1])/(t[0]-n[0])}function So(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=wo(u,i);++t<e;)r[t]=(o+(o=wo(u=i,i=n[t+1])))/2;return r[t]=o,r}function ko(n){for(var t,e,r,u,i=[],o=So(n),a=-1,c=n.length-1;++a<c;)t=wo(n[a],n[a+1]),oa(t)<Aa?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ys,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Co(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=_t(e),_=_t(u),b=e===r?function(){return g}:_t(r),w=u===i?function(){return p}:_t(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=br,r=br,u=0,i=wr,o=be,a=oo,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=Ms.get(n)||oo).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function No(n){return n.radius}function Lo(n){return[n.x,n.y]}function To(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ys;return[e*Math.cos(r),e*Math.sin(r)]}}function qo(){return 64}function zo(){return"circle"}function Ro(n){var t=Math.sqrt(n/Sa);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Do(n,t){return fa(n,Ns),n.id=t,n}function Po(n,t,e,r){var u=n.id;return R(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function Uo(n){return null==n&&(n=""),function(){this.textContent=n}}function jo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,Xo.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]<js[i]/u?i-1:i]:[Os,Yi(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=zi(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=zi(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.3"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},Xo.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},Xo.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r;return i?u+i*(n[r]-u):u},Xo.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var ia=Xo.bisector(function(n){return n});Xo.bisectLeft=ia.left,Xo.bisect=Xo.bisectRight=ia.right,Xo.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},Xo.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},Xo.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},Xo.transpose=function(n){return Xo.zip.apply(Xo,n)},Xo.keys=function(n){var t=[];for(var e in n)t.push(e);return t},Xo.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},Xo.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},Xo.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},Xo.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:i,get:function(n){return this[aa+n]},set:function(n,t){return this[aa+n]=t},remove:o,keys:a,values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1),this[t])}});var aa="\x00",ca=aa.charCodeAt(0);Xo.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=f(n,t,t[e]);return n};var sa=["webkit","ms","moz","Moz","o","O"];Xo.dispatch=function(){for(var n=new p,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=v(n);return n},p.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=Sizzle,va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return x(i)},da.selectAll=function(n){var t,e,r=[];n=_(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Bo(n.call(e,e.__data__,a,u))),t.parentNode=e);return x(r)};var ma={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};Xo.ns={prefix:ma,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!S(n[u]).test(t))return!1;return!0}for(t in n)this.each(E(t,n[t]));return this}return this.each(E(n,t))},da.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=T(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=T(o);for(;f>r;++r)p[r]=T(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=D([]),s=x([]),l=x([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},da.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},da.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=z.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},da.each=function(n){return R(this,function(t,e,r){n.call(t,t.__data__,e,r)})},da.call=function(n){var t=Bo(arguments);return n.apply(t[0]=this,t),this},da.empty=function(){return!this.node()},da.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return x(o)},ya.insert=function(n,t){return arguments.length<2&&(t=P(this)),da.insert.call(this,n,t)},da.transition=function(){for(var n,t,e=ks||++Ls,r=[],u=Es||{time:Date.now(),ease:yu,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&jo(t,c,e,u),n.push(t)}return Do(r,e)},da.interrupt=function(){return this.each(U)},Xo.select=function(n){var t=["string"==typeof n?ha(n,Wo):n];return t.parentNode=Jo,x([t])},Xo.selectAll=function(n){var t=Bo("string"==typeof n?ga(n,Wo):n);return t.parentNode=Jo,x([t])};var xa=Xo.select(Jo);da.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,Ta=Math.SQRT2,qa=2,za=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(Ta*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(Ta*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+za*f)/(2*i*qa*h),p=(c*c-i*i-za*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Ta;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=T.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=T.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=T.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=T.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",T=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=T.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,T,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<s;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;s>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv(" ","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;zt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:Tt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)ie(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){oe(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)oe(e[r],t,0)},Polygon:function(n,t){ae(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)ae(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)ie(e[r],t)}};Xo.geo.area=function(n){return fc=0,Xo.geo.stream(n,gc),fc};var fc,hc=new re,gc={sphere:function(){fc+=4*Sa},point:g,lineStart:g,lineEnd:g,polygonStart:function(){hc.reset(),gc.lineStart=ce},polygonEnd:function(){var n=2*hc;fc+=0>n?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,Te,ze,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(They)}).raw=They,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t<l.length-h;++t)g.push(n[a[l[t]][2]]);return g}var e=br,r=wr;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Xo.geom.polygon=function(n){return fa(n,Zc),n};var Zc=Xo.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=Cr(n),c=-1,s=this.length-Cr(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Er(o,l,u)?(Er(i,l,u)||n.push(Ar(i,o,l,u)),n.push(o)):Er(i,l,u)&&n.push(Ar(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];Pr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(jr),t.length},Br.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},Wr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Qr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(Gr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Kr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(Kr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Gr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Qr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,Gr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,Kr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,Gr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,Kr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,Gr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,Kr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},Xo.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return nu(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&eu(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=_t(r=n),t):r},t.y=function(n){return arguments.length?(o=_t(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];Xo.geom.delaunay=function(n){return Xo.geom.voronoi().triangles(n)},Xo.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(oa(c-e)+oa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=br,c=wr;return(o=arguments.length)?(a=ru,c=uu,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},Xo.interpolateRgb=au,Xo.interpolateObject=cu,Xo.interpolateNumber=su,Xo.interpolateString=lu;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;Xo.interpolate=fu,Xo.interpolators=[function(n,t){var e=typeof t;return("string"===e?Va.has(t)||/^(#|rgb\(|hsl\()/.test(t)?au:lu:t instanceof G?au:"object"===e?Array.isArray(t)?hu:cu:su)(n,t)}],Xo.interpolateArray=hu;var ns=function(){return bt},ts=Xo.map({linear:ns,poly:xu,quad:function(){return du},cubic:function(){return mu},sin:function(){return Mu},exp:function(){return _u},circle:function(){return bu},elastic:wu,back:Su,bounce:function(){return ku}}),es=Xo.map({"in":bt,out:pu,"in-out":vu,"out-in":function(n){return vu(pu(n))}});Xo.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Uu(n[e]));return t}},Xo.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=Xo.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push(Xo.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(ka-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},Xo.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=u-e,c=i*i+o*o;if(c>a*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=m.length,l=y.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=Bu,u=Xu,i=$u;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},Xo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=Xo.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},Vu(e,r)},Xo.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/Xo.sum(o),s=Xo.range(i.length);null!=e&&s.sort(e===as?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=as,r=0,u=ka;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var as={};Xo.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=Xo.permute(s,f),l=Xo.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;vi(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=si(a),i=ci(i),a&&i;)c=ci(c),o=si(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=Xo.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ti,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ti(t):qi(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return qi(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ti:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},Vu(i,a)},Xo.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[])},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(To(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=zo,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),jo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return Do(i,u)},Ns.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=_(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&jo(u,g,o,i),t.push(u)}return Do(a,o)},Ns.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=Ts,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":Ts,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ts="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return zs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=f[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=Ri(t),c=a[0],s=a[1],p=L[e],v=e?f:l,d=v[1]-v[0];return C&&(c-=p,s-=d+p),r=(e?g:h)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=p)+d:(x&&(p=Math.max(c,Math.min(s,2*x[e]-r))),r>p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),T=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?T.on("touchmove.brush",v).on("touchend.brush",y):T.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],f[1-z]-L[1]],L[0]=l[q],L[1]=f[z]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var zs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(Math.ceil(n/e)*e,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}();'use strict';(function(window){window.define=undefined;}).call(this,this);'use strict';tr.exportTo('tr.ui.b',function(){const DataSeriesEnableChangeEventType='data-series-enabled-change';const THIS_DOC=document.currentScript.ownerDocument;const svgNS='http://www.w3.org/2000/svg';const ColorScheme=tr.b.ColorScheme;function getColorOfKey(key,selected){let id=ColorScheme.getColorIdForGeneralPurposeString(key);if(selected){id+=ColorScheme.properties.brightenedOffsets[0];} +return this.rangeOfInterest_;},get rangeOfInterest(){return this.rangeOfInterest_;},set rangeOfInterest(rangeOfInterest){this.rangeOfInterest_=rangeOfInterest;this.updateContents_();},get selection(){},set selection(_){},get textLabel(){return'Frame Data';},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();}});tr.ui.side_panel.SidePanelRegistry.register(function(){return document.createElement('tr-ui-e-s-frame-data-side-panel');});});'use strict';Polymer({is:'tr-ui-b-chart-legend-key',ready(){this.$.checkbox.addEventListener('change',this.onCheckboxChange_.bind(this));},onCheckboxChange_(){tr.b.dispatchSimpleEvent(this,tr.ui.b.DataSeriesEnableChangeEventType,true,false,{key:Polymer.dom(this).textContent,enabled:this.enabled});},set textContent(t){Polymer.dom(this.$.label).textContent=t;Polymer.dom(this.$.link).textContent=t;this.updateContents_();},set width(w){w-=20;this.$.link.style.width=w+'px';this.$.label.style.width=w+'px';},get textContent(){return Polymer.dom(this.$.label).textContent;},set optional(optional){this.$.checkbox.style.visibility=optional?'visible':'hidden';},get optional(){return this.$.checkbox.style.visibility==='visible';},set enabled(enabled){this.$.checkbox.checked=enabled?'checked':'';},get enabled(){return this.$.checkbox.checked;},set color(c){this.$.label.style.color=c;this.$.link.color=c;},set target(target){this.$.link.setSelectionAndContent(target,Polymer.dom(this.$.label).textContent);this.updateContents_();},get target(){return this.$.link.selection;},set title(title){this.$.link.title=title;},updateContents_(){this.$.link.style.display=this.target?'':'none';this.$.label.style.display=this.target?'none':'';this.$.label.htmlFor=this.optional?'checkbox':'';}});'use strict';(function(window){window.define=function(x){window.d3=x;};window.define.amd=true;})(this);!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function d(){Xo.event.preventDefault()}function m(){for(var n,t=Xo.event;n=t.sourceEvent;)t=n;return t}function y(n){for(var t=new p,e=0,r=arguments.length;++e<r;)t[arguments[e]]=v(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=Xo.event;u.target=n,Xo.event=u,t[u.type].apply(e,r)}finally{Xo.event=i}}},t}function x(n){return fa(n,da),n}function M(n){return"function"==typeof n?n:function(){return ha(n,this)}}function _(n){return"function"==typeof n?n:function(){return ga(n,this)}}function b(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=Xo.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function w(n){return n.trim().replace(/\s+/g," ")}function S(n){return new RegExp("(?:^|\\s+)"+Xo.requote(n)+"(?:\\s+|$)","g")}function k(n){return n.trim().split(/^|\s+/)}function E(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=k(n).map(A);var u=n.length;return"function"==typeof t?r:e}function A(n){var t=S(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",w(u+" "+n))):e.setAttribute("class",w(u.replace(t," ")))}}function C(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function N(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function L(n){return"function"==typeof n?n:(n=Xo.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function T(n){return{__data__:n}}function q(n){return function(){return va(this,n)}}function z(n){return arguments.length||(n=Xo.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function R(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function U(){var n=this.__transition__;n&&++n.active}function j(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=c(t,Bo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+Xo.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),c=H;a>0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o,a=0,c=0,s=0;if(u=/([a-z]+)\((.*)\)/i.exec(n))switch(i=u[2].split(","),u[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Mt(i[0]),Mt(i[1]),Mt(i[2]))}return(o=Va.get(n))?t(o.r,o.g,o.b):(null!=n&&"#"===n.charAt(0)&&(r=parseInt(n.substring(1),16),isNaN(r)||(4===n.length?(a=(3840&r)>>4,a=a>>4|a,c=240&r,c=c>>4|c,s=15&r,s=s<<4|s):7===n.length&&(a=(16711680&r)>>16,c=(65280&r)>>8,s=255&r))),t(a,c,s))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return $a=n,e}function Nt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Lt(n,t){var e=Math.pow(10,3*oa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Tt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function zt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=zt;var r=new zt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=zt;var r=new zt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=uc[e=n.charAt(++a)])&&(e=n.charAt(++a)),(i=C[e])&&(e=i(t,null==u?"e"===e?" ":"0":u)),o.push(e),c=a+1);return o.push(n.substring(c,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},u=e(r,n,t,0);if(u!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var i=null!=r.Z&&ec!==zt,o=new(i?zt:ec);return"j"in r?o.setFullYear(r.y,0,r.j):"w"in r&&("W"in r||"U"in r)?(o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+Math.floor(r.Z/100),r.M+r.Z%100,r.S,r.L),i?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=zt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=zt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function Ft(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ot(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Yt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function It(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Zt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.y=Xt(+r[0]),e+r[0].length):-1}function Vt(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Xt(n){return n+(n>68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function re(){}function ue(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function ie(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function oe(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function ae(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)oe(n[e],t,1);t.polygonEnd()}function ce(){function n(n,t){n*=Na,t=t*Na/2+Sa/4;var e=n-r,o=e>=0?1:-1,a=o*e,c=Math.cos(t),s=Math.sin(t),l=i*s,f=u*c+l*Math.cos(a),h=l*o*Math.sin(a);hc.add(Math.atan2(h,f)),r=n,u=c,i=s}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*Na,u=Math.cos(a=(e=a)*Na/2+Sa/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function se(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function le(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function fe(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function he(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ge(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function pe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function ve(n){return[Math.atan2(n[1],n[0]),X(n[2])]}function de(n,t){return oa(n[0]-t[0])<Aa&&oa(n[1]-t[1])<Aa}function me(n,t){n*=Na;var e=Math.cos(t*=Na);ye(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function ye(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function xe(){function n(n,u){n*=Na;var i=Math.cos(u*=Na),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),ye(t,e,r)}var t,e,r;kc.point=function(u,i){u*=Na;var o=Math.cos(i*=Na);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,ye(t,e,r)}}function Me(){kc.point=me}function _e(){function n(n,t){n*=Na;var e=Math.cos(t*=Na),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-V(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),ye(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=Na;var c=Math.cos(a*=Na);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),ye(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Me,kc.point=me}}function be(){return!0}function we(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(de(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function ke(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Ee(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Sa,k=p*x;if(hc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*ka:_,S^h>=e^m>=e){var E=fe(se(f),se(n));pe(E);var A=fe(u,E);pe(A);var C=(S^_>=0?-1:1)*X(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function Te(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)<Aa?(n.point(e,r=(r+o)/2>0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)<Aa&&(e-=u*Aa),oa(i-a)<Aa&&(i-=a*Aa),r=qe(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function qe(n,t,e,r){var u,i,o=Math.sin(n-e);return oa(o)>Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function ze(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]<t[0]?Sa:-Sa;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function Re(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);he(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(he(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)<Aa,N=C||Aa>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)<Aa?k:E):k<=_[1]&&_[1]<=E:A>Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return he(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)<Aa?u>0?0:3:oa(r[0]-e)<Aa?u>0?2:1:oa(r[1]-t)<Aa?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function He(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)<Aa||oa(r-h)<Aa?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||oa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)<Aa?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Sa/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=I(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ea]},e):xr}function yr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return oa(u)<Aa?er:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-I(u)*Math.sqrt(n*n+e*e)]},e)}function xr(n,t){return[n,Math.log(Math.tan(Sa/4+t/2))]}function Mr(n){var t,e=Qe(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=Sa*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function _r(n,t){return[Math.log(Math.tan(Sa/4+t/2)),-n]}function br(n){return n[0]}function wr(n){return n[1]}function Sr(n){for(var t=n.length,e=[0,1],r=2,u=2;t>u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function Tr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Tr(n);for(var c=i;c.circle&&oa(e-c.circle.x)<Aa&&oa(r-c.circle.cy)<Aa;)i=c.P,a.unshift(c),Tr(c),c=i;a.unshift(c),Or(c);for(var s=o;s.circle&&oa(e-s.circle.x)<Aa&&oa(r-s.circle.cy)<Aa;)o=s.N,a.push(s),Tr(s),s=o;a.push(s),Or(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function zr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)<Aa&&p-u>Aa?{x:f,y:oa(t-f)<Aa?e:p}:oa(u-p)<Aa&&h-r>Aa?{x:oa(e-p)<Aa?t:h,y:p}:oa(r-h)<Aa&&u-g>Aa?{x:h,y:oa(t-h)<Aa?e:g}:oa(u-g)<Aa&&r-f>Aa?{x:oa(e-g)<Aa?t:f,y:g}:null),i.site,null)),++c)}function jr(n,t){return t.angle-n.angle}function Hr(){Jr(this),this.x=this.y=this.arc=this.site=this.cy=null}function Fr(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function Or(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),Jr(t),n.circle=null)}function Yr(n){for(var t,e=Vc,r=De(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!Ir(t,n)||!r(t)||oa(t.a.x-t.b.x)<Aa&&oa(t.a.y-t.b.y)<Aa)&&(t.a=t.b=null,e.splice(u,1))}function Ir(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2;if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function Zr(n,t){this.l=n,this.r=t,this.a=this.b=null}function Vr(n,t,e,r){var u=new Zr(n,t);return Vc.push(u),e&&$r(u,n,t,e),r&&$r(u,t,n,r),Xc[n.i].edges.push(new Br(u,n,t)),Xc[t.i].edges.push(new Br(u,t,n)),u}function Xr(n,t,e){var r=new Zr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function $r(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function Br(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function Wr(){this._=null}function Jr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function Gr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function Kr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Qr(n){for(;n.L;)n=n.L;return n}function nu(n,t){var e,r,u,i=n.sort(tu).pop();for(Vc=[],Xc=new Array(n.length),$c=new Wr,Wc=new Wr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new Pr(i),zr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;qr(u.arc)}t&&(Yr(t),Ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function tu(n,t){return t.y-n.y||t.x-n.x}function eu(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function ru(n){return n.x}function uu(n){return n.y}function iu(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function ou(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&ou(n,c[0],e,r,o,a),c[1]&&ou(n,c[1],o,r,u,a),c[2]&&ou(n,c[2],e,a,o,i),c[3]&&ou(n,c[3],o,a,u,i)}}function au(n,t){n=Xo.rgb(n),t=Xo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+vt(Math.round(e+i*n))+vt(Math.round(r+o*n))+vt(Math.round(u+a*n))}}function cu(n,t){var e,r={},u={};for(e in n)e in t?r[e]=fu(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function su(n,t){return t-=n=+n,function(e){return n+t*e}}function lu(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=Tu(t,e),i=qu(zu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*La,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*La:0}function Tu(n,t){return n[0]*t[0]+n[1]*t[1]}function qu(n){var t=Math.sqrt(Tu(n,n));return t&&(n[0]/=t,n[1]/=t),t}function zu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ru(n,t){var e,r=[],u=[],i=Xo.transform(n),o=Xo.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:su(a[0],c[0])},{i:3,x:su(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function Du(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function Pu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function Uu(n){for(var t=n.source,e=n.target,r=Hu(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function ju(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Hu(n,t){if(n===t)return n;for(var e=ju(n),r=ju(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function Fu(n){n.fixed|=2}function Ou(n){n.fixed&=-7}function Yu(n){n.fixed|=4,n.px=n.x,n.py=n.y}function Iu(n){n.fixed&=-5}function Zu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(Zu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function Vu(n,t){return Xo.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=Wu,n}function Xu(n){return n.children}function $u(n){return n.value}function Bu(n,t){return t.value-n.value}function Wu(n){return Xo.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function Ju(n){return n.x}function Gu(n){return n.y}function Ku(n,t,e){n.y0=t,n.y=e}function Qu(n){return Xo.range(n.length)}function ni(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function ti(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=li(e[i],t),n)>0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function vi(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Mi(r,u=a):Mi(r=c,u),o--):(xi(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)ki(u[i],t,e,r)}function Ei(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Ai(n){return 1+Xo.max(n,function(n){return n.y})}function Ci(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ni(n){var t=n.children;return t&&t.length?Ni(t[0]):n}function Li(n){var t,e=n.children;return e&&(t=e.length)?Li(e[t-1]):n}function Ti(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function qi(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function zi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():zi(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=Xo.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function Hi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=zi(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=zi(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return zi(t.a[0])},e.copy=function(){return Ji(n,t)},e.domain(n)}function Gi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=Xo.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[Xo.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort(Xo.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return Gi(n,t)},e()}function Ki(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=br,r=wr,u=be,i=oo,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=Ms.get(n)||oo).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function oo(n){return n.join("L")}function ao(n){return oo(n)+"Z"}function co(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function lo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function fo(n,t){return n.length<4?oo(n):n[1]+po(n.slice(1,n.length-1),vo(n,t))}function ho(n,t){return n.length<3?oo(n):n[0]+po((n.push(n[0]),n),vo([n[n.length-2]].concat(n,[n[1]]),t))}function go(n,t){return n.length<3?oo(n):n[0]+po(n,vo(n,t))}function po(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return oo(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function vo(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function mo(n){if(n.length<3)return oo(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",_o(ws,o),",",_o(ws,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),bo(c,o,a);return n.pop(),c.push("L",r),c.join("")}function yo(n){if(n.length<4)return oo(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(_o(ws,i)+","+_o(ws,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),bo(e,i,o);return e.join("")}function xo(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[_o(ws,o),",",_o(ws,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),bo(t,o,a);return t.join("")}function Mo(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return mo(n)}function _o(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function bo(n,t,e){n.push("C",_o(_s,t),",",_o(_s,e),",",_o(bs,t),",",_o(bs,e),",",_o(ws,t),",",_o(ws,e))}function wo(n,t){return(t[1]-n[1])/(t[0]-n[0])}function So(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=wo(u,i);++t<e;)r[t]=(o+(o=wo(u=i,i=n[t+1])))/2;return r[t]=o,r}function ko(n){for(var t,e,r,u,i=[],o=So(n),a=-1,c=n.length-1;++a<c;)t=wo(n[a],n[a+1]),oa(t)<Aa?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ys,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Co(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=_t(e),_=_t(u),b=e===r?function(){return g}:_t(r),w=u===i?function(){return p}:_t(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=br,r=br,u=0,i=wr,o=be,a=oo,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=Ms.get(n)||oo).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function No(n){return n.radius}function Lo(n){return[n.x,n.y]}function To(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ys;return[e*Math.cos(r),e*Math.sin(r)]}}function qo(){return 64}function zo(){return"circle"}function Ro(n){var t=Math.sqrt(n/Sa);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Do(n,t){return fa(n,Ns),n.id=t,n}function Po(n,t,e,r){var u=n.id;return R(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function Uo(n){return null==n&&(n=""),function(){this.textContent=n}}function jo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,Xo.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]<js[i]/u?i-1:i]:[Os,Yi(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=zi(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=zi(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.3"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},Xo.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},Xo.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r;return i?u+i*(n[r]-u):u},Xo.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var ia=Xo.bisector(function(n){return n});Xo.bisectLeft=ia.left,Xo.bisect=Xo.bisectRight=ia.right,Xo.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},Xo.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},Xo.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},Xo.transpose=function(n){return Xo.zip.apply(Xo,n)},Xo.keys=function(n){var t=[];for(var e in n)t.push(e);return t},Xo.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},Xo.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},Xo.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},Xo.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:i,get:function(n){return this[aa+n]},set:function(n,t){return this[aa+n]=t},remove:o,keys:a,values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1),this[t])}});var aa="\x00",ca=aa.charCodeAt(0);Xo.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=f(n,t,t[e]);return n};var sa=["webkit","ms","moz","Moz","o","O"];Xo.dispatch=function(){for(var n=new p,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=v(n);return n},p.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=Sizzle,va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return x(i)},da.selectAll=function(n){var t,e,r=[];n=_(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Bo(n.call(e,e.__data__,a,u))),t.parentNode=e);return x(r)};var ma={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};Xo.ns={prefix:ma,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!S(n[u]).test(t))return!1;return!0}for(t in n)this.each(E(t,n[t]));return this}return this.each(E(n,t))},da.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=T(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=T(o);for(;f>r;++r)p[r]=T(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=D([]),s=x([]),l=x([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},da.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},da.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=z.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},da.each=function(n){return R(this,function(t,e,r){n.call(t,t.__data__,e,r)})},da.call=function(n){var t=Bo(arguments);return n.apply(t[0]=this,t),this},da.empty=function(){return!this.node()},da.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return x(o)},ya.insert=function(n,t){return arguments.length<2&&(t=P(this)),da.insert.call(this,n,t)},da.transition=function(){for(var n,t,e=ks||++Ls,r=[],u=Es||{time:Date.now(),ease:yu,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&jo(t,c,e,u),n.push(t)}return Do(r,e)},da.interrupt=function(){return this.each(U)},Xo.select=function(n){var t=["string"==typeof n?ha(n,Wo):n];return t.parentNode=Jo,x([t])},Xo.selectAll=function(n){var t=Bo("string"==typeof n?ga(n,Wo):n);return t.parentNode=Jo,x([t])};var xa=Xo.select(Jo);da.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,Ta=Math.SQRT2,qa=2,za=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(Ta*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(Ta*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+za*f)/(2*i*qa*h),p=(c*c-i*i-za*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Ta;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=T.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=T.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=T.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=T.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",T=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=T.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,T,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<s;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;s>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv(" ","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;zt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:Tt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)ie(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){oe(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)oe(e[r],t,0)},Polygon:function(n,t){ae(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)ae(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)ie(e[r],t)}};Xo.geo.area=function(n){return fc=0,Xo.geo.stream(n,gc),fc};var fc,hc=new re,gc={sphere:function(){fc+=4*Sa},point:g,lineStart:g,lineEnd:g,polygonStart:function(){hc.reset(),gc.lineStart=ce},polygonEnd:function(){var n=2*hc;fc+=0>n?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,Te,ze,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(He)}).raw=He,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t<l.length-h;++t)g.push(n[a[l[t]][2]]);return g}var e=br,r=wr;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Xo.geom.polygon=function(n){return fa(n,Zc),n};var Zc=Xo.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=Cr(n),c=-1,s=this.length-Cr(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Er(o,l,u)?(Er(i,l,u)||n.push(Ar(i,o,l,u)),n.push(o)):Er(i,l,u)&&n.push(Ar(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];Pr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(jr),t.length},Br.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},Wr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Qr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(Gr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Kr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(Kr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Gr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Qr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,Gr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,Kr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,Gr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,Kr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,Gr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,Kr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},Xo.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return nu(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&eu(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=_t(r=n),t):r},t.y=function(n){return arguments.length?(o=_t(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];Xo.geom.delaunay=function(n){return Xo.geom.voronoi().triangles(n)},Xo.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(oa(c-e)+oa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=br,c=wr;return(o=arguments.length)?(a=ru,c=uu,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},Xo.interpolateRgb=au,Xo.interpolateObject=cu,Xo.interpolateNumber=su,Xo.interpolateString=lu;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;Xo.interpolate=fu,Xo.interpolators=[function(n,t){var e=typeof t;return("string"===e?Va.has(t)||/^(#|rgb\(|hsl\()/.test(t)?au:lu:t instanceof G?au:"object"===e?Array.isArray(t)?hu:cu:su)(n,t)}],Xo.interpolateArray=hu;var ns=function(){return bt},ts=Xo.map({linear:ns,poly:xu,quad:function(){return du},cubic:function(){return mu},sin:function(){return Mu},exp:function(){return _u},circle:function(){return bu},elastic:wu,back:Su,bounce:function(){return ku}}),es=Xo.map({"in":bt,out:pu,"in-out":vu,"out-in":function(n){return vu(pu(n))}});Xo.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Uu(n[e]));return t}},Xo.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=Xo.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push(Xo.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(ka-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},Xo.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=u-e,c=i*i+o*o;if(c>a*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=m.length,l=y.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=Bu,u=Xu,i=$u;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},Xo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=Xo.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},Vu(e,r)},Xo.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/Xo.sum(o),s=Xo.range(i.length);null!=e&&s.sort(e===as?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=as,r=0,u=ka;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var as={};Xo.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=Xo.permute(s,f),l=Xo.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;vi(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=si(a),i=ci(i),a&&i;)c=ci(c),o=si(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=Xo.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ti,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ti(t):qi(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return qi(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ti:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},Vu(i,a)},Xo.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[])},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(To(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=zo,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),jo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return Do(i,u)},Ns.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=_(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&jo(u,g,o,i),t.push(u)}return Do(a,o)},Ns.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=Ts,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":Ts,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ts="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return zs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=f[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=Ri(t),c=a[0],s=a[1],p=L[e],v=e?f:l,d=v[1]-v[0];return C&&(c-=p,s-=d+p),r=(e?g:h)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=p)+d:(x&&(p=Math.max(c,Math.min(s,2*x[e]-r))),r>p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),T=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?T.on("touchmove.brush",v).on("touchend.brush",y):T.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],f[1-z]-L[1]],L[0]=l[q],L[1]=f[z]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var zs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(Math.ceil(n/e)*e,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}();'use strict';(function(window){window.define=undefined;}).call(this,this);'use strict';tr.exportTo('tr.ui.b',function(){const DataSeriesEnableChangeEventType='data-series-enabled-change';const THIS_DOC=document.currentScript.ownerDocument;const svgNS='http://www.w3.org/2000/svg';const ColorScheme=tr.b.ColorScheme;function getColorOfKey(key,selected){let id=ColorScheme.getColorIdForGeneralPurposeString(key);if(selected){id+=ColorScheme.properties.brightenedOffsets[0];} return ColorScheme.colorsAsStrings[id];} function getSVGTextSize(parentNode,text,opt_callback,opt_this){const textNode=document.createElementNS('http://www.w3.org/2000/svg','text');textNode.setAttributeNS(null,'x',0);textNode.setAttributeNS(null,'y',0);textNode.setAttributeNS(null,'fill','black');textNode.appendChild(document.createTextNode(text));parentNode.appendChild(textNode);if(opt_callback){opt_callback.call(opt_this||parentNode,textNode);} const width=textNode.getComputedTextLength();const height=textNode.getBBox().height;parentNode.removeChild(textNode);return{width,height};} @@ -8200,7 +8200,7 @@ if(rendererHelper.isTelemetryInternalEvent(ev))continue;const frameIdRef=ev.args.frame;if(frameIdRef===undefined)continue;let list=candidatesForFrameId[frameIdRef];if(list===undefined){candidatesForFrameId[frameIdRef]=list=[];} list.push(ev);} return candidatesForFrameId;} -const URL_EXCLUSION=['about:blank','data:text/html,pluginplaceholderdata','data:text/html,chromewebdata'];function shouldIgnoreURL(url){return URL_EXCLUSION.includes(url);} +const URL_BLACKLIST=['about:blank','data:text/html,pluginplaceholderdata','data:text/html,chromewebdata'];function shouldIgnoreURL(url){return URL_BLACKLIST.includes(url);} function collectTimeToEvent(category,eventName,rendererHelper,navigationStartFinder){const targetEvents=findAllEvents(rendererHelper,category,eventName);const samples=[];for(const ev of targetEvents){if(rendererHelper.isTelemetryInternalEvent(ev))continue;const frameIdRef=ev.args.frame;const snapshot=findFrameLoaderSnapshotAt(rendererHelper,frameIdRef,ev.start);if(snapshot===undefined||!snapshot.args.isLoadingMainFrame)continue;const url=snapshot.args.documentLoaderURL;if(shouldIgnoreURL(url))continue;const navigationStartEvent=navigationStartFinder.findNavigationStartEventForFrameBeforeTimestamp(frameIdRef,ev.start);if(navigationStartEvent===undefined)continue;const timeToEvent=ev.start-navigationStartEvent.start;samples.push({value:timeToEvent,diagnostics:{url:new tr.v.d.Generic(url)}});} return samples;} function addFirstMeaningfulPaintSample(samples,rendererHelper,frameIdRef,navigationStart,fmpMarkerEvent){const snapshot=findFrameLoaderSnapshotAt(rendererHelper,frameIdRef,fmpMarkerEvent.start);if(!snapshot||!snapshot.args.isLoadingMainFrame)return;const url=snapshot.args.documentLoaderURL;if(shouldIgnoreURL(url))return;const navStartToFMPRange=tr.b.math.Range.fromExplicitRange(navigationStart.start,fmpMarkerEvent.start);const networkEvents=getNetworkEventsInRange(rendererHelper.process,navStartToFMPRange);const timeToFirstMeaningfulPaint=navStartToFMPRange.duration;const breakdownTree=tr.metrics.sh.generateWallClockTimeBreakdownTree(rendererHelper.mainThread,networkEvents,navStartToFMPRange);const breakdownDiagnostic=createBreakdownDiagnostic(breakdownTree);samples.push({value:timeToFirstMeaningfulPaint,diagnostics:{'Breakdown of [navStart, FMP]':breakdownDiagnostic,'Start':new RelatedEventSet(navigationStart),'End':new RelatedEventSet(fmpMarkerEvent),'Navigation infos':new tr.v.d.Generic({url,pid:rendererHelper.pid,start:navigationStart.start,fmp:fmpMarkerEvent.start}),}});return{firstMeaningfulPaint:fmpMarkerEvent.start,url};} @@ -8218,7 +8218,7 @@ return{firstMeaningfulPaintSamples,firstMeaningfulPaintCpuTimeSamples,firstInteractiveSamples};} function collectLoadingMetricsForRenderer(rendererHelper){const navigationStartFinder=new NavigationStartFinder(rendererHelper);const firstContentfulPaintSamples=collectTimeToEvent('loading','firstContentfulPaint',rendererHelper,navigationStartFinder);const onLoadSamples=collectTimeToEvent('blink.user_timing','loadEventStart',rendererHelper,navigationStartFinder);const{firstMeaningfulPaintSamples,firstMeaningfulPaintCpuTimeSamples,firstInteractiveSamples}=collectFirstMeaningfulPaintAndTimeToInteractiveForRenderer(rendererHelper,navigationStartFinder);return{firstContentfulPaintSamples,onLoadSamples,firstMeaningfulPaintSamples,firstMeaningfulPaintCpuTimeSamples,firstInteractiveSamples};} function addSamplesToHistogram(samples,histogram){for(const sample of samples){histogram.addSample(sample.value,sample.diagnostics);}} -function loadingMetric(histograms,model){const firstContentfulPaintHistogram=createHistogram('timeToFirstContentfulPaint');firstContentfulPaintHistogram.description='time to first contentful paint';const onLoadHistogram=createHistogram('timeToOnload');onLoadHistogram.description='time to onload. '+'This is temporary metric used for PCv1/v2 correctness checking';const firstMeaningfulPaintHistogram=createHistogram('timeToFirstMeaningfulPaint');firstMeaningfulPaintHistogram.description='time to first meaningful paint';const firstMeaningfulPaintCpuTimeHistogram=createHistogram('cpuTimeToFirstMeaningfulPaint');firstMeaningfulPaintCpuTimeHistogram.description='CPU time to first meaningful paint';const firstInteractiveHistogram=createHistogram('timeToFirstInteractive');firstInteractiveHistogram.description='time to first interactive';const chromeHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);for(const pid in chromeHelper.rendererHelpers){const rendererHelper=chromeHelper.rendererHelpers[pid];if(rendererHelper.isChromeTracingUI)continue;const{firstContentfulPaintSamples,onLoadSamples,firstMeaningfulPaintSamples,firstMeaningfulPaintCpuTimeSamples,firstInteractiveSamples}=collectLoadingMetricsForRenderer(rendererHelper);addSamplesToHistogram(firstContentfulPaintSamples,firstContentfulPaintHistogram);addSamplesToHistogram(onLoadSamples,onLoadHistogram);addSamplesToHistogram(firstMeaningfulPaintSamples,firstMeaningfulPaintHistogram);addSamplesToHistogram(firstMeaningfulPaintCpuTimeSamples,firstMeaningfulPaintCpuTimeHistogram);addSamplesToHistogram(firstInteractiveSamples,firstInteractiveHistogram);} +function loadingMetric(histograms,model){const firstContentfulPaintHistogram=createHistogram('timeToFirstContentfulPaint');firstContentfulPaintHistogram.description='time to first contentful paint';const onLoadHistogram=createHistogram('timeToOnload');onLoadHistogram.description='time to onload. '+'This is temporary metric used for PCv1/v2 sanity checking';const firstMeaningfulPaintHistogram=createHistogram('timeToFirstMeaningfulPaint');firstMeaningfulPaintHistogram.description='time to first meaningful paint';const firstMeaningfulPaintCpuTimeHistogram=createHistogram('cpuTimeToFirstMeaningfulPaint');firstMeaningfulPaintCpuTimeHistogram.description='CPU time to first meaningful paint';const firstInteractiveHistogram=createHistogram('timeToFirstInteractive');firstInteractiveHistogram.description='time to first interactive';const chromeHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);for(const pid in chromeHelper.rendererHelpers){const rendererHelper=chromeHelper.rendererHelpers[pid];if(rendererHelper.isChromeTracingUI)continue;const{firstContentfulPaintSamples,onLoadSamples,firstMeaningfulPaintSamples,firstMeaningfulPaintCpuTimeSamples,firstInteractiveSamples}=collectLoadingMetricsForRenderer(rendererHelper);addSamplesToHistogram(firstContentfulPaintSamples,firstContentfulPaintHistogram);addSamplesToHistogram(onLoadSamples,onLoadHistogram);addSamplesToHistogram(firstMeaningfulPaintSamples,firstMeaningfulPaintHistogram);addSamplesToHistogram(firstMeaningfulPaintCpuTimeSamples,firstMeaningfulPaintCpuTimeHistogram);addSamplesToHistogram(firstInteractiveSamples,firstInteractiveHistogram);} histograms.addHistogram(firstContentfulPaintHistogram);histograms.addHistogram(onLoadHistogram);histograms.addHistogram(firstMeaningfulPaintHistogram);histograms.addHistogram(firstMeaningfulPaintCpuTimeHistogram);histograms.addHistogram(firstInteractiveHistogram);} tr.metrics.MetricRegistry.register(loadingMetric);return{loadingMetric,getNetworkEventsInRange,collectLoadingMetricsForRenderer,RESPONSIVENESS_THRESHOLD_MS,INTERACTIVE_WINDOW_SIZE_MS,};});'use strict';tr.exportTo('tr.metrics',function(){const SPA_NAVIGATION_START_TO_FIRST_PAINT_DURATION_BIN_BOUNDARY=tr.v.HistogramBinBoundaries.createExponential(1,1000,50);function spaNavigationMetric(histograms,model){const histogram=new tr.v.Histogram('spaNavigationStartToFpDuration',tr.b.Unit.byName.timeDurationInMs_smallerIsBetter,SPA_NAVIGATION_START_TO_FIRST_PAINT_DURATION_BIN_BOUNDARY);histogram.description='Latency between the input event causing'+' a SPA navigation and the first paint event after it';histogram.customizeSummaryOptions({count:false,sum:false,});const modelHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);if(!modelHelper){return;} const rendererHelpers=modelHelper.rendererHelpers;if(!rendererHelpers){return;} @@ -8270,7 +8270,7 @@ if(!found){diagnosticsToCounts.set(diagnostic,1);}}} for(const[diagnostic,count]of diagnosticsToCounts){if(count>1){this.sharedDiagnosticsByGuid_.set(diagnostic.guid,diagnostic);}}} mergeRelationships(){for(const hist of this){hist.diagnostics.mergeRelationships(hist);}}} -HistogramSet.GROUPINGS={HISTOGRAM_NAME:new HistogramGrouping('name',h=>h.name),BENCHMARK_NAME:new HistogramGrouping('benchmark',h=>tr.v.d.TelemetryInfo.getField(h,'benchmarkName','')),BENCHMARK_START:new HistogramGrouping('time',h=>tr.v.d.TelemetryInfo.getField(h,'benchmarkStartString','')),STORYSET_REPEAT:new HistogramGrouping('storyset_repeat',h=>tr.v.d.TelemetryInfo.getField(h,'storysetRepeatCounterLabel',0),'storyset repeat'),STORY_NAME:new HistogramGrouping('story',h=>tr.v.d.TelemetryInfo.getField(h,'storyDisplayName','')),LEGACY_TIR_LABEL:new HistogramGrouping('tir',h=>tr.v.d.TelemetryInfo.getField(h,'legacyTIRLabel','')),PRIMARY_NAME:new HistogramGrouping('primary',h=>tr.v.d.BuildbotInfo.getField(h,'buildbotMasterName','')),PARTNER_NAME:new HistogramGrouping('bot',h=>tr.v.d.BuildbotInfo.getField(h,'buildbotName','')),BUILD_NUMBER:new HistogramGrouping('build',h=>tr.v.d.BuildbotInfo.getField(h,'buildNumber','')),DISPLAY_LABEL:new HistogramGrouping('label',h=>tr.v.d.TelemetryInfo.getField(h,'displayLabel','Value'))};return{HistogramGrouping,HistogramSet,};});'use strict';tr.exportTo('tr.e.chrome',function(){function hasTitleAndCategory(event,title,category){return event.title===title&&event.category&&tr.b.getCategoryParts(event.category).includes(category);} +HistogramSet.GROUPINGS={HISTOGRAM_NAME:new HistogramGrouping('name',h=>h.name),BENCHMARK_NAME:new HistogramGrouping('benchmark',h=>tr.v.d.TelemetryInfo.getField(h,'benchmarkName','')),BENCHMARK_START:new HistogramGrouping('time',h=>tr.v.d.TelemetryInfo.getField(h,'benchmarkStartString','')),STORYSET_REPEAT:new HistogramGrouping('storyset_repeat',h=>tr.v.d.TelemetryInfo.getField(h,'storysetRepeatCounterLabel',0),'storyset repeat'),STORY_NAME:new HistogramGrouping('story',h=>tr.v.d.TelemetryInfo.getField(h,'storyDisplayName','')),LEGACY_TIR_LABEL:new HistogramGrouping('tir',h=>tr.v.d.TelemetryInfo.getField(h,'legacyTIRLabel','')),MASTER_NAME:new HistogramGrouping('master',h=>tr.v.d.BuildbotInfo.getField(h,'buildbotMasterName','')),SLAVE_NAME:new HistogramGrouping('bot',h=>tr.v.d.BuildbotInfo.getField(h,'buildbotName','')),BUILD_NUMBER:new HistogramGrouping('build',h=>tr.v.d.BuildbotInfo.getField(h,'buildNumber','')),DISPLAY_LABEL:new HistogramGrouping('label',h=>tr.v.d.TelemetryInfo.getField(h,'displayLabel','Value'))};return{HistogramGrouping,HistogramSet,};});'use strict';tr.exportTo('tr.e.chrome',function(){function hasTitleAndCategory(event,title,category){return event.title===title&&event.category&&tr.b.getCategoryParts(event.category).includes(category);} function getNavStartTimestamps(rendererHelper){const navStartTimestamps=[];for(const e of rendererHelper.mainThread.sliceGroup.childEvents()){if(hasTitleAndCategory(e,'navigationStart','blink.user_timing')){navStartTimestamps.push(e.start);}} return navStartTimestamps;} function getInteractiveTimestamps(model){const interactiveTimestampsMap=new Map();const chromeHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);for(const rendererHelper of Object.values(chromeHelper.rendererHelpers)){const timestamps=[];interactiveTimestampsMap.set(rendererHelper.pid,timestamps);const samples=tr.metrics.sh.collectLoadingMetricsForRenderer(rendererHelper).firstInteractiveSamples;for(const sample of samples){timestamps.push(sample.diagnostics['Navigation infos'].value.interactive);}} @@ -8864,8 +8864,8 @@ const chartData={x:0};for(const row of tableRows){if(row.numberValue===undefined)continue;row.tableSum=tableSum;chartData[row.name]=row.numberValue;const dataSeries=this.chart_.getDataSeries(row.name);dataSeries.color=row.color;dataSeries.highlightedColor=row.highlightedColor;} if(tableRows.length>0){this.$.table.style.display='block';this.$.empty.style.display='none';this.$.table.tableRows=tableRows;this.$.table.rebuild();} if(Object.keys(chartData).length>1){this.$.container.style.display='block';this.$.empty.style.display='none';this.chart_.data=[chartData];}}});'use strict';Polymer({is:'tr-v-ui-buildbot-info-span',ready(){this.diagnostic_=undefined;this.$.table.showHeader=false;this.$.table.tableColumns=[{value:row=>row[0]},{value:row=>row[1]}];},get diagnostic(){return this.diagnostic_;},set diagnostic(d){this.diagnostic_=d;this.updateContents_();},updateContents_(){if(this.diagnostic===undefined){this.$.table.tableRows=[];return;} -const rows=[];if(this.diagnostic.displayMasterName){rows.push(['primary',this.diagnostic.displayMasterName]);} -if(this.diagnostic.buildbotMasterName){rows.push(['primary',this.diagnostic.buildbotMasterName]);} +const rows=[];if(this.diagnostic.displayMasterName){rows.push(['master',this.diagnostic.displayMasterName]);} +if(this.diagnostic.buildbotMasterName){rows.push(['master',this.diagnostic.buildbotMasterName]);} if(this.diagnostic.displayBotName){rows.push(['bot',this.diagnostic.displayBotName]);} if(this.diagnostic.buildbotName){rows.push(['bot',this.diagnostic.buildbotName]);} if(this.diagnostic.buildNumber){rows.push(['build number',this.diagnostic.buildNumber]);} @@ -8881,7 +8881,7 @@ if(this.diagnostic.ram){rows.push(['ram',tr.b.Unit.byName.sizeInBytes.format(this.diagnostic.ram)]);} this.$.table.tableRows=rows;}});'use strict';Polymer({is:'tr-v-ui-generic-diagnostic-span',ready(){this.diagnostic_=undefined;},get diagnostic(){return this.diagnostic_;},set diagnostic(d){this.diagnostic_=d;this.updateContents_();},updateContents_(){if(this.diagnostic===undefined){this.$.generic.object=undefined;return;} this.$.generic.object=this.diagnostic.value;}});'use strict';Polymer({is:'tr-v-ui-merged-buildbot-info-span',ready(){this.diagnostic_=undefined;this.$.table.showHeader=false;this.$.table.tableColumns=[{value:row=>row[0]},{value:row=>row[1]},];},get diagnostic(){return this.diagnostic_;},set diagnostic(d){this.diagnostic_=d;this.updateContents_();},updateContents_(){if(this.diagnostic===undefined){this.$.table.tableRows=[];return;} -const rows=[];if(this.diagnostic.displayMasterNames.size){rows.push(['primarys',Array.from(this.diagnostic.displayMasterNames).join(', ')]);} +const rows=[];if(this.diagnostic.displayMasterNames.size){rows.push(['masters',Array.from(this.diagnostic.displayMasterNames).join(', ')]);} if(this.diagnostic.displayBotNames.size){rows.push(['bots',Array.from(this.diagnostic.displayBotNames).join(', ')]);} if(this.diagnostic.buildNumbers.size){rows.push(['builds',Array.from(this.diagnostic.buildNumbers).join(', ')]);} for(const logUri of this.diagnostic.logUris){const anchor=document.createElement('a');anchor.href=logUri;anchor.innerText=logUri;rows.push(['log',anchor]);} @@ -10125,10 +10125,10 @@ root 519 2 0 0 irq_thread 0 S [irq/128-arm-smm] 5 root 520 2 0 0 rescuer_thread 0 S [sb-1] 5 root 521 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread1] 5 -root 522 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl1] 5 +root 522 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl1] 5 root 523 2 0 0 rescuer_thread 0 S [sb-3] 5 root 524 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread3] 5 -root 525 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl3] 5 +root 525 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl3] 5 root 526 2 0 0 sensor_sysfs_notify 0 S [therm_core:noti] 5 root 527 2 0 0 sensor_sysfs_notify 0 S [therm_core:noti] 5 root 528 2 0 0 sensor_sysfs_notify 0 S [therm_core:noti] 5 @@ -10175,7 +10175,7 @@ system 597 580 28240 928 SyS_rt_sigsuspend 7643332634 S qseecomd qseecomd root 606 1 13352 2320 binder_ioctl_write_read 78b565355c S [email protected] [email protected] system 607 1 14464 2432 binder_ioctl_write_read 70106ed55c S [email protected] [email protected] -system 608 1 14640 2972 binder_ioctl_write_read 79bd92a55c S [email protected] [email protected] +system 608 1 14640 2972 binder_ioctl_write_read 79bd92a55c S [email protected] [email protected] system 610 1 72936 20336 SyS_epoll_wait 796c90946c S surfaceflinger surfaceflinger system 612 1 39400 7656 binder_ioctl_write_read 7cbc3fc55c S [email protected] [email protected] system 613 1 17476 2436 binder_ioctl_write_read 7f5b86a55c S [email protected] [email protected] @@ -10894,7 +10894,7 @@ system 597 604 qseecomd root 606 606 [email protected] system 607 607 [email protected] -system 608 608 [email protected] +system 608 608 [email protected] system 610 610 surfaceflinger system 610 626 HwBinder:610_1 system 610 627 Binder:610_1 @@ -12868,10 +12868,10 @@ root 519 2 0 0 irq_thread 0 S [irq/128-arm-smm] 5 root 520 2 0 0 rescuer_thread 0 S [sb-1] 5 root 521 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread1] 5 -root 522 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl1] 5 +root 522 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl1] 5 root 523 2 0 0 rescuer_thread 0 S [sb-3] 5 root 524 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread3] 5 -root 525 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl3] 5 +root 525 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl3] 5 root 526 2 0 0 sensor_sysfs_notify 0 S [therm_core:noti] 5 root 527 2 0 0 sensor_sysfs_notify 0 S [therm_core:noti] 5 root 528 2 0 0 sensor_sysfs_notify 0 S [therm_core:noti] 5 @@ -12918,7 +12918,7 @@ system 597 580 28240 928 SyS_rt_sigsuspend 7643332634 S qseecomd qseecomd root 606 1 13352 2732 binder_ioctl_write_read 78b565355c S [email protected] [email protected] system 607 1 14464 2828 binder_ioctl_write_read 70106ed55c S [email protected] [email protected] -system 608 1 14640 3244 binder_ioctl_write_read 79bd92a55c S [email protected] [email protected] +system 608 1 14640 3244 binder_ioctl_write_read 79bd92a55c S [email protected] [email protected] system 610 1 72936 20132 SyS_epoll_wait 796c90946c S surfaceflinger surfaceflinger system 612 1 39400 7148 binder_ioctl_write_read 7cbc3fc55c S [email protected] [email protected] system 614 1 20296 3156 binder_ioctl_write_read 74c594755c S [email protected] [email protected] @@ -13632,7 +13632,7 @@ system 597 604 qseecomd root 606 606 [email protected] system 607 607 [email protected] -system 608 608 [email protected] +system 608 608 [email protected] system 610 610 surfaceflinger system 610 626 HwBinder:610_1 system 610 627 Binder:610_1 @@ -26342,7 +26342,7 @@ atrace-13734 (13734) [001] ...1 158025.773116: tracing_mark_write: B|13734|HIDL::IServiceManager::get::client hwservicemanag-584 ( 584) [001] ...1 158025.773160: tracing_mark_write: B|584|HIDL::IServiceManager::get::server hwservicemanag-584 ( 584) [001] ...1 158025.773261: tracing_mark_write: E - [email protected] ( 608) [005] ...1 158025.773265: tracing_mark_write: B|608|HIDL::IBase::notifySyspropsChanged::server + [email protected] ( 608) [005] ...1 158025.773265: tracing_mark_write: B|608|HIDL::IBase::notifySyspropsChanged::server atrace-13734 (13734) [001] ...1 158025.773328: tracing_mark_write: E atrace-13734 (13734) [001] ...1 158025.773335: tracing_mark_write: B|13734|HIDL::IBase::notifySyspropsChanged::client atrace-13734 (13734) [001] ...1 158025.773389: tracing_mark_write: E @@ -26357,7 +26357,7 @@ atrace-13734 (13734) [001] ...1 158025.773652: tracing_mark_write: E atrace-13734 (13734) [001] ...1 158025.773666: tracing_mark_write: B|13734|HIDL::IServiceManager::get::client hwservicemanag-584 ( 584) [001] ...1 158025.773705: tracing_mark_write: B|584|HIDL::IServiceManager::get::server - [email protected] ( 608) [005] ...1 158025.773725: tracing_mark_write: E + [email protected] ( 608) [005] ...1 158025.773725: tracing_mark_write: E hwservicemanag-584 ( 584) [001] ...1 158025.773820: tracing_mark_write: E atrace-13734 (13734) [001] ...1 158025.773889: tracing_mark_write: E atrace-13734 (13734) [001] ...1 158025.773896: tracing_mark_write: B|13734|HIDL::IBase::notifySyspropsChanged::client
diff --git a/tools/systrace_parser/parser/test/unittest.html b/tools/systrace_parser/parser/test/unittest.html index 9a31e77..38fb55b 100644 --- a/tools/systrace_parser/parser/test/unittest.html +++ b/tools/systrace_parser/parser/test/unittest.html
@@ -4734,7 +4734,7 @@ visitedDomainIds.add(current.domainId);const outgoingTransformers=this.transformerMapByDomainId_[current.domainId];if(!outgoingTransformers)continue;for(const outgoingDomainId in outgoingTransformers){const toNextDomainTransformer=outgoingTransformers[outgoingDomainId];const toCurrentDomainTransformer=current.transformer;queue.push({domainId:outgoingDomainId,transformer:Transformer.compose(toNextDomainTransformer,toCurrentDomainTransformer)});}} return undefined;},selectModelDomainId_(){this.ensureAllDomainsAreConnected_();for(const chromeDomainId of POSSIBLE_CHROME_CLOCK_DOMAINS){if(this.domainsSeen_.has(chromeDomainId)){this.modelDomainId_=chromeDomainId;return;}} const domainsSeenArray=Array.from(this.domainsSeen_);domainsSeenArray.sort();this.modelDomainId_=domainsSeenArray[0];},ensureAllDomainsAreConnected_(){let firstDomainId=undefined;for(const domainId of this.domainsSeen_){if(!firstDomainId){firstDomainId=domainId;continue;} -if(!this.getTransformerBetween_(firstDomainId,domainId)){throw new Error('Unable to select a primary clock domain because no '+'path can be found from "'+firstDomainId+'" to "'+domainId+'".');}} +if(!this.getTransformerBetween_(firstDomainId,domainId)){throw new Error('Unable to select a master clock domain because no '+'path can be found from "'+firstDomainId+'" to "'+domainId+'".');}} return true;},onDomainSeen_(domainId){if(domainId===ClockDomainId.UNKNOWN_CHROME_LEGACY&&!this.domainsSeen_.has(ClockDomainId.UNKNOWN_CHROME_LEGACY)){for(const chromeDomainId of POSSIBLE_CHROME_CLOCK_DOMAINS){if(chromeDomainId===ClockDomainId.UNKNOWN_CHROME_LEGACY){continue;} this.collapseDomains_(ClockDomainId.UNKNOWN_CHROME_LEGACY,chromeDomainId);}} this.domainsSeen_.add(domainId);},onSyncCompleted_(marker1,marker2){const forwardTransformer=Transformer.fromMarkers(marker1,marker2);const backwardTransformer=Transformer.fromMarkers(marker2,marker1);const existingTransformer=this.getOrCreateTransformerMap_(marker1.domainId)[marker2.domainId];if(!existingTransformer||forwardTransformer.error<existingTransformer.error){this.getOrCreateTransformerMap_(marker1.domainId)[marker2.domainId]=forwardTransformer;this.getOrCreateTransformerMap_(marker2.domainId)[marker1.domainId]=backwardTransformer;}},collapseDomains_(domain1Id,domain2Id){this.getOrCreateTransformerMap_(domain1Id)[domain2Id]=this.getOrCreateTransformerMap_(domain2Id)[domain1Id]=Transformer.IDENTITY;},getOrCreateTransformerMap_(domainId){if(!this.transformerMapByDomainId_[domainId]){this.transformerMapByDomainId_[domainId]={};} @@ -5152,7 +5152,7 @@ static uint8ArrayToString_(arr){if(typeof TextDecoder!=='undefined'){const decoder=new TextDecoder('utf-8');return decoder.decode(arr);} const c=[];for(let i=0;i<arr.length;i+=MAX_FUNCTION_ARGS_COUNT){c.push(String.fromCharCode(...arr.subarray(i,i+MAX_FUNCTION_ARGS_COUNT)));} return c.join('');}} -return{InMemoryTraceStream,};});!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).pako=t()}}(function(){return function t(e,a,i){function n(s,o){if(!a[s]){if(!e[s]){var l="function"==typeof require&&require;if(!o&&l)return l(s,!0);if(r)return r(s,!0);var h=new Error("Cannot find module '"+s+"'");throw h.code="MODULE_NOT_FOUND",h}var d=a[s]={exports:{}};e[s][0].call(d.exports,function(t){var a=e[s][1][t];return n(a||t)},d,d.exports,t,e,a,i)}return a[s].exports}for(var r="function"==typeof require&&require,s=0;s<i.length;s++)n(i[s]);return n}({1:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({level:_,method:c,chunkSize:16384,windowBits:15,memLevel:8,strategy:u,to:""},t||{});var e=this.options;e.raw&&e.windowBits>0?e.windowBits=-e.windowBits:e.gzip&&e.windowBits>0&&e.windowBits<16&&(e.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new h,this.strm.avail_out=0;var a=r.deflateInit2(this.strm,e.level,e.method,e.windowBits,e.memLevel,e.strategy);if(a!==f)throw new Error(l[a]);if(e.header&&r.deflateSetHeader(this.strm,e.header),e.dictionary){var n;if(n="string"==typeof e.dictionary?o.string2buf(e.dictionary):"[object ArrayBuffer]"===d.call(e.dictionary)?new Uint8Array(e.dictionary):e.dictionary,(a=r.deflateSetDictionary(this.strm,n))!==f)throw new Error(l[a]);this._dict_set=!0}}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||l[a.err];return a.result}var r=t("./zlib/deflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/messages"),h=t("./zlib/zstream"),d=Object.prototype.toString,f=0,_=-1,u=0,c=8;i.prototype.push=function(t,e){var a,i,n=this.strm,l=this.options.chunkSize;if(this.ended)return!1;i=e===~~e?e:!0===e?4:0,"string"==typeof t?n.input=o.string2buf(t):"[object ArrayBuffer]"===d.call(t)?n.input=new Uint8Array(t):n.input=t,n.next_in=0,n.avail_in=n.input.length;do{if(0===n.avail_out&&(n.output=new s.Buf8(l),n.next_out=0,n.avail_out=l),1!==(a=r.deflate(n,i))&&a!==f)return this.onEnd(a),this.ended=!0,!1;0!==n.avail_out&&(0!==n.avail_in||4!==i&&2!==i)||("string"===this.options.to?this.onData(o.buf2binstring(s.shrinkBuf(n.output,n.next_out))):this.onData(s.shrinkBuf(n.output,n.next_out)))}while((n.avail_in>0||0===n.avail_out)&&1!==a);return 4===i?(a=r.deflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===f):2!==i||(this.onEnd(f),n.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===f&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Deflate=i,a.deflate=n,a.deflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.gzip=function(t,e){return e=e||{},e.gzip=!0,n(t,e)}},{"./utils/common":3,"./utils/strings":4,"./zlib/deflate":8,"./zlib/messages":13,"./zlib/zstream":15}],2:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({chunkSize:16384,windowBits:0,to:""},t||{});var e=this.options;e.raw&&e.windowBits>=0&&e.windowBits<16&&(e.windowBits=-e.windowBits,0===e.windowBits&&(e.windowBits=-15)),!(e.windowBits>=0&&e.windowBits<16)||t&&t.windowBits||(e.windowBits+=32),e.windowBits>15&&e.windowBits<48&&0==(15&e.windowBits)&&(e.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new d,this.strm.avail_out=0;var a=r.inflateInit2(this.strm,e.windowBits);if(a!==l.Z_OK)throw new Error(h[a]);this.header=new f,r.inflateGetHeader(this.strm,this.header)}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||h[a.err];return a.result}var r=t("./zlib/inflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/constants"),h=t("./zlib/messages"),d=t("./zlib/zstream"),f=t("./zlib/gzheader"),_=Object.prototype.toString;i.prototype.push=function(t,e){var a,i,n,h,d,f,u=this.strm,c=this.options.chunkSize,b=this.options.dictionary,g=!1;if(this.ended)return!1;i=e===~~e?e:!0===e?l.Z_FINISH:l.Z_NO_FLUSH,"string"==typeof t?u.input=o.binstring2buf(t):"[object ArrayBuffer]"===_.call(t)?u.input=new Uint8Array(t):u.input=t,u.next_in=0,u.avail_in=u.input.length;do{if(0===u.avail_out&&(u.output=new s.Buf8(c),u.next_out=0,u.avail_out=c),(a=r.inflate(u,l.Z_NO_FLUSH))===l.Z_NEED_DICT&&b&&(f="string"==typeof b?o.string2buf(b):"[object ArrayBuffer]"===_.call(b)?new Uint8Array(b):b,a=r.inflateSetDictionary(this.strm,f)),a===l.Z_BUF_ERROR&&!0===g&&(a=l.Z_OK,g=!1),a!==l.Z_STREAM_END&&a!==l.Z_OK)return this.onEnd(a),this.ended=!0,!1;u.next_out&&(0!==u.avail_out&&a!==l.Z_STREAM_END&&(0!==u.avail_in||i!==l.Z_FINISH&&i!==l.Z_SYNC_FLUSH)||("string"===this.options.to?(n=o.utf8border(u.output,u.next_out),h=u.next_out-n,d=o.buf2string(u.output,n),u.next_out=h,u.avail_out=c-h,h&&s.arraySet(u.output,u.output,n,h,0),this.onData(d)):this.onData(s.shrinkBuf(u.output,u.next_out)))),0===u.avail_in&&0===u.avail_out&&(g=!0)}while((u.avail_in>0||0===u.avail_out)&&a!==l.Z_STREAM_END);return a===l.Z_STREAM_END&&(i=l.Z_FINISH),i===l.Z_FINISH?(a=r.inflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===l.Z_OK):i!==l.Z_SYNC_FLUSH||(this.onEnd(l.Z_OK),u.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===l.Z_OK&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Inflate=i,a.inflate=n,a.inflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.ungzip=n},{"./utils/common":3,"./utils/strings":4,"./zlib/constants":6,"./zlib/gzheader":9,"./zlib/inflate":11,"./zlib/messages":13,"./zlib/zstream":15}],3:[function(t,e,a){"use strict";function i(t,e){return Object.prototype.hasOwnProperty.call(t,e)}var n="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;a.assign=function(t){for(var e=Array.prototype.slice.call(arguments,1);e.length;){var a=e.shift();if(a){if("object"!=typeof a)throw new TypeError(a+"must be non-object");for(var n in a)i(a,n)&&(t[n]=a[n])}}return t},a.shrinkBuf=function(t,e){return t.length===e?t:t.subarray?t.subarray(0,e):(t.length=e,t)};var r={arraySet:function(t,e,a,i,n){if(e.subarray&&t.subarray)t.set(e.subarray(a,a+i),n);else for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){var e,a,i,n,r,s;for(i=0,e=0,a=t.length;e<a;e++)i+=t[e].length;for(s=new Uint8Array(i),n=0,e=0,a=t.length;e<a;e++)r=t[e],s.set(r,n),n+=r.length;return s}},s={arraySet:function(t,e,a,i,n){for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){return[].concat.apply([],t)}};a.setTyped=function(t){t?(a.Buf8=Uint8Array,a.Buf16=Uint16Array,a.Buf32=Int32Array,a.assign(a,r)):(a.Buf8=Array,a.Buf16=Array,a.Buf32=Array,a.assign(a,s))},a.setTyped(n)},{}],4:[function(t,e,a){"use strict";function i(t,e){if(e<65537&&(t.subarray&&s||!t.subarray&&r))return String.fromCharCode.apply(null,n.shrinkBuf(t,e));for(var a="",i=0;i<e;i++)a+=String.fromCharCode(t[i]);return a}var n=t("./common"),r=!0,s=!0;try{String.fromCharCode.apply(null,[0])}catch(t){r=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(t){s=!1}for(var o=new n.Buf8(256),l=0;l<256;l++)o[l]=l>=252?6:l>=248?5:l>=240?4:l>=224?3:l>=192?2:1;o[254]=o[254]=1,a.string2buf=function(t){var e,a,i,r,s,o=t.length,l=0;for(r=0;r<o;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),l+=a<128?1:a<2048?2:a<65536?3:4;for(e=new n.Buf8(l),s=0,r=0;s<l;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),a<128?e[s++]=a:a<2048?(e[s++]=192|a>>>6,e[s++]=128|63&a):a<65536?(e[s++]=224|a>>>12,e[s++]=128|a>>>6&63,e[s++]=128|63&a):(e[s++]=240|a>>>18,e[s++]=128|a>>>12&63,e[s++]=128|a>>>6&63,e[s++]=128|63&a);return e},a.buf2binstring=function(t){return i(t,t.length)},a.binstring2buf=function(t){for(var e=new n.Buf8(t.length),a=0,i=e.length;a<i;a++)e[a]=t.charCodeAt(a);return e},a.buf2string=function(t,e){var a,n,r,s,l=e||t.length,h=new Array(2*l);for(n=0,a=0;a<l;)if((r=t[a++])<128)h[n++]=r;else if((s=o[r])>4)h[n++]=65533,a+=s-1;else{for(r&=2===s?31:3===s?15:7;s>1&&a<l;)r=r<<6|63&t[a++],s--;s>1?h[n++]=65533:r<65536?h[n++]=r:(r-=65536,h[n++]=55296|r>>10&1023,h[n++]=56320|1023&r)}return i(h,n)},a.utf8border=function(t,e){var a;for((e=e||t.length)>t.length&&(e=t.length),a=e-1;a>=0&&128==(192&t[a]);)a--;return a<0?e:0===a?e:a+o[t[a]]>e?a:e}},{"./common":3}],5:[function(t,e,a){"use strict";e.exports=function(t,e,a,i){for(var n=65535&t|0,r=t>>>16&65535|0,s=0;0!==a;){a-=s=a>2e3?2e3:a;do{r=r+(n=n+e[i++]|0)|0}while(--s);n%=65521,r%=65521}return n|r<<16|0}},{}],6:[function(t,e,a){"use strict";e.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],7:[function(t,e,a){"use strict";var i=function(){for(var t,e=[],a=0;a<256;a++){t=a;for(var i=0;i<8;i++)t=1&t?3988292384^t>>>1:t>>>1;e[a]=t}return e}();e.exports=function(t,e,a,n){var r=i,s=n+a;t^=-1;for(var o=n;o<s;o++)t=t>>>8^r[255&(t^e[o])];return-1^t}},{}],8:[function(t,e,a){"use strict";function i(t,e){return t.msg=A[e],e}function n(t){return(t<<1)-(t>4?9:0)}function r(t){for(var e=t.length;--e>=0;)t[e]=0}function s(t){var e=t.state,a=e.pending;a>t.avail_out&&(a=t.avail_out),0!==a&&(z.arraySet(t.output,e.pending_buf,e.pending_out,a,t.next_out),t.next_out+=a,e.pending_out+=a,t.total_out+=a,t.avail_out-=a,e.pending-=a,0===e.pending&&(e.pending_out=0))}function o(t,e){B._tr_flush_block(t,t.block_start>=0?t.block_start:-1,t.strstart-t.block_start,e),t.block_start=t.strstart,s(t.strm)}function l(t,e){t.pending_buf[t.pending++]=e}function h(t,e){t.pending_buf[t.pending++]=e>>>8&255,t.pending_buf[t.pending++]=255&e}function d(t,e,a,i){var n=t.avail_in;return n>i&&(n=i),0===n?0:(t.avail_in-=n,z.arraySet(e,t.input,t.next_in,n,a),1===t.state.wrap?t.adler=S(t.adler,e,n,a):2===t.state.wrap&&(t.adler=E(t.adler,e,n,a)),t.next_in+=n,t.total_in+=n,n)}function f(t,e){var a,i,n=t.max_chain_length,r=t.strstart,s=t.prev_length,o=t.nice_match,l=t.strstart>t.w_size-it?t.strstart-(t.w_size-it):0,h=t.window,d=t.w_mask,f=t.prev,_=t.strstart+at,u=h[r+s-1],c=h[r+s];t.prev_length>=t.good_match&&(n>>=2),o>t.lookahead&&(o=t.lookahead);do{if(a=e,h[a+s]===c&&h[a+s-1]===u&&h[a]===h[r]&&h[++a]===h[r+1]){r+=2,a++;do{}while(h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&r<_);if(i=at-(_-r),r=_-at,i>s){if(t.match_start=e,s=i,i>=o)break;u=h[r+s-1],c=h[r+s]}}}while((e=f[e&d])>l&&0!=--n);return s<=t.lookahead?s:t.lookahead}function _(t){var e,a,i,n,r,s=t.w_size;do{if(n=t.window_size-t.lookahead-t.strstart,t.strstart>=s+(s-it)){z.arraySet(t.window,t.window,s,s,0),t.match_start-=s,t.strstart-=s,t.block_start-=s,e=a=t.hash_size;do{i=t.head[--e],t.head[e]=i>=s?i-s:0}while(--a);e=a=s;do{i=t.prev[--e],t.prev[e]=i>=s?i-s:0}while(--a);n+=s}if(0===t.strm.avail_in)break;if(a=d(t.strm,t.window,t.strstart+t.lookahead,n),t.lookahead+=a,t.lookahead+t.insert>=et)for(r=t.strstart-t.insert,t.ins_h=t.window[r],t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+1])&t.hash_mask;t.insert&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+et-1])&t.hash_mask,t.prev[r&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=r,r++,t.insert--,!(t.lookahead+t.insert<et)););}while(t.lookahead<it&&0!==t.strm.avail_in)}function u(t,e){for(var a,i;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),0!==a&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a)),t.match_length>=et)if(i=B._tr_tally(t,t.strstart-t.match_start,t.match_length-et),t.lookahead-=t.match_length,t.match_length<=t.max_lazy_match&&t.lookahead>=et){t.match_length--;do{t.strstart++,t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart}while(0!=--t.match_length);t.strstart++}else t.strstart+=t.match_length,t.match_length=0,t.ins_h=t.window[t.strstart],t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+1])&t.hash_mask;else i=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++;if(i&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function c(t,e){for(var a,i,n;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),t.prev_length=t.match_length,t.prev_match=t.match_start,t.match_length=et-1,0!==a&&t.prev_length<t.max_lazy_match&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a),t.match_length<=5&&(t.strategy===H||t.match_length===et&&t.strstart-t.match_start>4096)&&(t.match_length=et-1)),t.prev_length>=et&&t.match_length<=t.prev_length){n=t.strstart+t.lookahead-et,i=B._tr_tally(t,t.strstart-1-t.prev_match,t.prev_length-et),t.lookahead-=t.prev_length-1,t.prev_length-=2;do{++t.strstart<=n&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart)}while(0!=--t.prev_length);if(t.match_available=0,t.match_length=et-1,t.strstart++,i&&(o(t,!1),0===t.strm.avail_out))return _t}else if(t.match_available){if((i=B._tr_tally(t,0,t.window[t.strstart-1]))&&o(t,!1),t.strstart++,t.lookahead--,0===t.strm.avail_out)return _t}else t.match_available=1,t.strstart++,t.lookahead--}return t.match_available&&(i=B._tr_tally(t,0,t.window[t.strstart-1]),t.match_available=0),t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function b(t,e){for(var a,i,n,r,s=t.window;;){if(t.lookahead<=at){if(_(t),t.lookahead<=at&&e===Z)return _t;if(0===t.lookahead)break}if(t.match_length=0,t.lookahead>=et&&t.strstart>0&&(n=t.strstart-1,(i=s[n])===s[++n]&&i===s[++n]&&i===s[++n])){r=t.strstart+at;do{}while(i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&n<r);t.match_length=at-(r-n),t.match_length>t.lookahead&&(t.match_length=t.lookahead)}if(t.match_length>=et?(a=B._tr_tally(t,1,t.match_length-et),t.lookahead-=t.match_length,t.strstart+=t.match_length,t.match_length=0):(a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++),a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function g(t,e){for(var a;;){if(0===t.lookahead&&(_(t),0===t.lookahead)){if(e===Z)return _t;break}if(t.match_length=0,a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++,a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function m(t,e,a,i,n){this.good_length=t,this.max_lazy=e,this.nice_length=a,this.max_chain=i,this.func=n}function w(t){t.window_size=2*t.w_size,r(t.head),t.max_lazy_match=x[t.level].max_lazy,t.good_match=x[t.level].good_length,t.nice_match=x[t.level].nice_length,t.max_chain_length=x[t.level].max_chain,t.strstart=0,t.block_start=0,t.lookahead=0,t.insert=0,t.match_length=t.prev_length=et-1,t.match_available=0,t.ins_h=0}function p(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=q,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new z.Buf16(2*$),this.dyn_dtree=new z.Buf16(2*(2*Q+1)),this.bl_tree=new z.Buf16(2*(2*V+1)),r(this.dyn_ltree),r(this.dyn_dtree),r(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new z.Buf16(tt+1),this.heap=new z.Buf16(2*J+1),r(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new z.Buf16(2*J+1),r(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function v(t){var e;return t&&t.state?(t.total_in=t.total_out=0,t.data_type=Y,e=t.state,e.pending=0,e.pending_out=0,e.wrap<0&&(e.wrap=-e.wrap),e.status=e.wrap?rt:dt,t.adler=2===e.wrap?0:1,e.last_flush=Z,B._tr_init(e),D):i(t,U)}function k(t){var e=v(t);return e===D&&w(t.state),e}function y(t,e,a,n,r,s){if(!t)return U;var o=1;if(e===L&&(e=6),n<0?(o=0,n=-n):n>15&&(o=2,n-=16),r<1||r>G||a!==q||n<8||n>15||e<0||e>9||s<0||s>M)return i(t,U);8===n&&(n=9);var l=new p;return t.state=l,l.strm=t,l.wrap=o,l.gzhead=null,l.w_bits=n,l.w_size=1<<l.w_bits,l.w_mask=l.w_size-1,l.hash_bits=r+7,l.hash_size=1<<l.hash_bits,l.hash_mask=l.hash_size-1,l.hash_shift=~~((l.hash_bits+et-1)/et),l.window=new z.Buf8(2*l.w_size),l.head=new z.Buf16(l.hash_size),l.prev=new z.Buf16(l.w_size),l.lit_bufsize=1<<r+6,l.pending_buf_size=4*l.lit_bufsize,l.pending_buf=new z.Buf8(l.pending_buf_size),l.d_buf=1*l.lit_bufsize,l.l_buf=3*l.lit_bufsize,l.level=e,l.strategy=s,l.method=a,k(t)}var x,z=t("../utils/common"),B=t("./trees"),S=t("./adler32"),E=t("./crc32"),A=t("./messages"),Z=0,R=1,C=3,N=4,O=5,D=0,I=1,U=-2,T=-3,F=-5,L=-1,H=1,j=2,K=3,M=4,P=0,Y=2,q=8,G=9,X=15,W=8,J=286,Q=30,V=19,$=2*J+1,tt=15,et=3,at=258,it=at+et+1,nt=32,rt=42,st=69,ot=73,lt=91,ht=103,dt=113,ft=666,_t=1,ut=2,ct=3,bt=4,gt=3;x=[new m(0,0,0,0,function(t,e){var a=65535;for(a>t.pending_buf_size-5&&(a=t.pending_buf_size-5);;){if(t.lookahead<=1){if(_(t),0===t.lookahead&&e===Z)return _t;if(0===t.lookahead)break}t.strstart+=t.lookahead,t.lookahead=0;var i=t.block_start+a;if((0===t.strstart||t.strstart>=i)&&(t.lookahead=t.strstart-i,t.strstart=i,o(t,!1),0===t.strm.avail_out))return _t;if(t.strstart-t.block_start>=t.w_size-it&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):(t.strstart>t.block_start&&(o(t,!1),t.strm.avail_out),_t)}),new m(4,4,8,4,u),new m(4,5,16,8,u),new m(4,6,32,32,u),new m(4,4,16,16,c),new m(8,16,32,32,c),new m(8,16,128,128,c),new m(8,32,128,256,c),new m(32,128,258,1024,c),new m(32,258,258,4096,c)],a.deflateInit=function(t,e){return y(t,e,q,X,W,P)},a.deflateInit2=y,a.deflateReset=k,a.deflateResetKeep=v,a.deflateSetHeader=function(t,e){return t&&t.state?2!==t.state.wrap?U:(t.state.gzhead=e,D):U},a.deflate=function(t,e){var a,o,d,f;if(!t||!t.state||e>O||e<0)return t?i(t,U):U;if(o=t.state,!t.output||!t.input&&0!==t.avail_in||o.status===ft&&e!==N)return i(t,0===t.avail_out?F:U);if(o.strm=t,a=o.last_flush,o.last_flush=e,o.status===rt)if(2===o.wrap)t.adler=0,l(o,31),l(o,139),l(o,8),o.gzhead?(l(o,(o.gzhead.text?1:0)+(o.gzhead.hcrc?2:0)+(o.gzhead.extra?4:0)+(o.gzhead.name?8:0)+(o.gzhead.comment?16:0)),l(o,255&o.gzhead.time),l(o,o.gzhead.time>>8&255),l(o,o.gzhead.time>>16&255),l(o,o.gzhead.time>>24&255),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,255&o.gzhead.os),o.gzhead.extra&&o.gzhead.extra.length&&(l(o,255&o.gzhead.extra.length),l(o,o.gzhead.extra.length>>8&255)),o.gzhead.hcrc&&(t.adler=E(t.adler,o.pending_buf,o.pending,0)),o.gzindex=0,o.status=st):(l(o,0),l(o,0),l(o,0),l(o,0),l(o,0),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,gt),o.status=dt);else{var _=q+(o.w_bits-8<<4)<<8;_|=(o.strategy>=j||o.level<2?0:o.level<6?1:6===o.level?2:3)<<6,0!==o.strstart&&(_|=nt),_+=31-_%31,o.status=dt,h(o,_),0!==o.strstart&&(h(o,t.adler>>>16),h(o,65535&t.adler)),t.adler=1}if(o.status===st)if(o.gzhead.extra){for(d=o.pending;o.gzindex<(65535&o.gzhead.extra.length)&&(o.pending!==o.pending_buf_size||(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending!==o.pending_buf_size));)l(o,255&o.gzhead.extra[o.gzindex]),o.gzindex++;o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),o.gzindex===o.gzhead.extra.length&&(o.gzindex=0,o.status=ot)}else o.status=ot;if(o.status===ot)if(o.gzhead.name){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.name.length?255&o.gzhead.name.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.gzindex=0,o.status=lt)}else o.status=lt;if(o.status===lt)if(o.gzhead.comment){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.comment.length?255&o.gzhead.comment.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.status=ht)}else o.status=ht;if(o.status===ht&&(o.gzhead.hcrc?(o.pending+2>o.pending_buf_size&&s(t),o.pending+2<=o.pending_buf_size&&(l(o,255&t.adler),l(o,t.adler>>8&255),t.adler=0,o.status=dt)):o.status=dt),0!==o.pending){if(s(t),0===t.avail_out)return o.last_flush=-1,D}else if(0===t.avail_in&&n(e)<=n(a)&&e!==N)return i(t,F);if(o.status===ft&&0!==t.avail_in)return i(t,F);if(0!==t.avail_in||0!==o.lookahead||e!==Z&&o.status!==ft){var u=o.strategy===j?g(o,e):o.strategy===K?b(o,e):x[o.level].func(o,e);if(u!==ct&&u!==bt||(o.status=ft),u===_t||u===ct)return 0===t.avail_out&&(o.last_flush=-1),D;if(u===ut&&(e===R?B._tr_align(o):e!==O&&(B._tr_stored_block(o,0,0,!1),e===C&&(r(o.head),0===o.lookahead&&(o.strstart=0,o.block_start=0,o.insert=0))),s(t),0===t.avail_out))return o.last_flush=-1,D}return e!==N?D:o.wrap<=0?I:(2===o.wrap?(l(o,255&t.adler),l(o,t.adler>>8&255),l(o,t.adler>>16&255),l(o,t.adler>>24&255),l(o,255&t.total_in),l(o,t.total_in>>8&255),l(o,t.total_in>>16&255),l(o,t.total_in>>24&255)):(h(o,t.adler>>>16),h(o,65535&t.adler)),s(t),o.wrap>0&&(o.wrap=-o.wrap),0!==o.pending?D:I)},a.deflateEnd=function(t){var e;return t&&t.state?(e=t.state.status)!==rt&&e!==st&&e!==ot&&e!==lt&&e!==ht&&e!==dt&&e!==ft?i(t,U):(t.state=null,e===dt?i(t,T):D):U},a.deflateSetDictionary=function(t,e){var a,i,n,s,o,l,h,d,f=e.length;if(!t||!t.state)return U;if(a=t.state,2===(s=a.wrap)||1===s&&a.status!==rt||a.lookahead)return U;for(1===s&&(t.adler=S(t.adler,e,f,0)),a.wrap=0,f>=a.w_size&&(0===s&&(r(a.head),a.strstart=0,a.block_start=0,a.insert=0),d=new z.Buf8(a.w_size),z.arraySet(d,e,f-a.w_size,a.w_size,0),e=d,f=a.w_size),o=t.avail_in,l=t.next_in,h=t.input,t.avail_in=f,t.next_in=0,t.input=e,_(a);a.lookahead>=et;){i=a.strstart,n=a.lookahead-(et-1);do{a.ins_h=(a.ins_h<<a.hash_shift^a.window[i+et-1])&a.hash_mask,a.prev[i&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=i,i++}while(--n);a.strstart=i,a.lookahead=et-1,_(a)}return a.strstart+=a.lookahead,a.block_start=a.strstart,a.insert=a.lookahead,a.lookahead=0,a.match_length=a.prev_length=et-1,a.match_available=0,t.next_in=l,t.input=h,t.avail_in=o,a.wrap=s,D},a.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./messages":13,"./trees":14}],9:[function(t,e,a){"use strict";e.exports=function(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}},{}],10:[function(t,e,a){"use strict";e.exports=function(t,e){var a,i,n,r,s,o,l,h,d,f,_,u,c,b,g,m,w,p,v,k,y,x,z,B,S;a=t.state,i=t.next_in,B=t.input,n=i+(t.avail_in-5),r=t.next_out,S=t.output,s=r-(e-t.avail_out),o=r+(t.avail_out-257),l=a.dmax,h=a.wsize,d=a.whave,f=a.wnext,_=a.window,u=a.hold,c=a.bits,b=a.lencode,g=a.distcode,m=(1<<a.lenbits)-1,w=(1<<a.distbits)-1;t:do{c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=b[u&m];e:for(;;){if(v=p>>>24,u>>>=v,c-=v,0===(v=p>>>16&255))S[r++]=65535&p;else{if(!(16&v)){if(0==(64&v)){p=b[(65535&p)+(u&(1<<v)-1)];continue e}if(32&v){a.mode=12;break t}t.msg="invalid literal/length code",a.mode=30;break t}k=65535&p,(v&=15)&&(c<v&&(u+=B[i++]<<c,c+=8),k+=u&(1<<v)-1,u>>>=v,c-=v),c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=g[u&w];a:for(;;){if(v=p>>>24,u>>>=v,c-=v,!(16&(v=p>>>16&255))){if(0==(64&v)){p=g[(65535&p)+(u&(1<<v)-1)];continue a}t.msg="invalid distance code",a.mode=30;break t}if(y=65535&p,v&=15,c<v&&(u+=B[i++]<<c,(c+=8)<v&&(u+=B[i++]<<c,c+=8)),(y+=u&(1<<v)-1)>l){t.msg="invalid distance too far back",a.mode=30;break t}if(u>>>=v,c-=v,v=r-s,y>v){if((v=y-v)>d&&a.correct){t.msg="invalid distance too far back",a.mode=30;break t}if(x=0,z=_,0===f){if(x+=h-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}else if(f<v){if(x+=h+f-v,(v-=f)<k){k-=v;do{S[r++]=_[x++]}while(--v);if(x=0,f<k){k-=v=f;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}}else if(x+=f-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}for(;k>2;)S[r++]=z[x++],S[r++]=z[x++],S[r++]=z[x++],k-=3;k&&(S[r++]=z[x++],k>1&&(S[r++]=z[x++]))}else{x=r-y;do{S[r++]=S[x++],S[r++]=S[x++],S[r++]=S[x++],k-=3}while(k>2);k&&(S[r++]=S[x++],k>1&&(S[r++]=S[x++]))}break}}break}}while(i<n&&r<o);i-=k=c>>3,u&=(1<<(c-=k<<3))-1,t.next_in=i,t.next_out=r,t.avail_in=i<n?n-i+5:5-(i-n),t.avail_out=r<o?o-r+257:257-(r-o),a.hold=u,a.bits=c}},{}],11:[function(t,e,a){"use strict";function i(t){return(t>>>24&255)+(t>>>8&65280)+((65280&t)<<8)+((255&t)<<24)}function n(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new u.Buf16(320),this.work=new u.Buf16(288),this.lendyn=null,this.distdyn=null,this.correct=0,this.back=0,this.was=0}function r(t){var e;return t&&t.state?(e=t.state,t.total_in=t.total_out=e.total=0,t.msg="",e.wrap&&(t.adler=1&e.wrap),e.mode=N,e.last=0,e.havedict=0,e.dmax=32768,e.head=null,e.hold=0,e.bits=0,e.lencode=e.lendyn=new u.Buf32(dt),e.distcode=e.distdyn=new u.Buf32(ft),e.correct=1,e.back=-1,z):E}function s(t){var e;return t&&t.state?(e=t.state,e.wsize=0,e.whave=0,e.wnext=0,r(t)):E}function o(t,e){var a,i;return t&&t.state?(i=t.state,e<0?(a=0,e=-e):(a=1+(e>>4),e<48&&(e&=15)),e&&(e<8||e>15)?E:(null!==i.window&&i.wbits!==e&&(i.window=null),i.wrap=a,i.wbits=e,s(t))):E}function l(t,e){var a,i;return t?(i=new n,t.state=i,i.window=null,(a=o(t,e))!==z&&(t.state=null),a):E}function h(t){if(ut){var e;for(f=new u.Buf32(512),_=new u.Buf32(32),e=0;e<144;)t.lens[e++]=8;for(;e<256;)t.lens[e++]=9;for(;e<280;)t.lens[e++]=7;for(;e<288;)t.lens[e++]=8;for(m(p,t.lens,0,288,f,0,t.work,{bits:9}),e=0;e<32;)t.lens[e++]=5;m(v,t.lens,0,32,_,0,t.work,{bits:5}),ut=!1}t.lencode=f,t.lenbits=9,t.distcode=_,t.distbits=5}function d(t,e,a,i){var n,r=t.state;return null===r.window&&(r.wsize=1<<r.wbits,r.wnext=0,r.whave=0,r.window=new u.Buf8(r.wsize)),i>=r.wsize?(u.arraySet(r.window,e,a-r.wsize,r.wsize,0),r.wnext=0,r.whave=r.wsize):((n=r.wsize-r.wnext)>i&&(n=i),u.arraySet(r.window,e,a-i,n,r.wnext),(i-=n)?(u.arraySet(r.window,e,a-i,i,0),r.wnext=i,r.whave=r.wsize):(r.wnext+=n,r.wnext===r.wsize&&(r.wnext=0),r.whave<r.wsize&&(r.whave+=n))),0}var f,_,u=t("../utils/common"),c=t("./adler32"),b=t("./crc32"),g=t("./inffast"),m=t("./inftrees"),w=0,p=1,v=2,k=4,y=5,x=6,z=0,B=1,S=2,E=-2,A=-3,Z=-4,R=-5,C=8,N=1,O=2,D=3,I=4,U=5,T=6,F=7,L=8,H=9,j=10,K=11,M=12,P=13,Y=14,q=15,G=16,X=17,W=18,J=19,Q=20,V=21,$=22,tt=23,et=24,at=25,it=26,nt=27,rt=28,st=29,ot=30,lt=31,ht=32,dt=852,ft=592,_t=15,ut=!0;a.inflateReset=s,a.inflateReset2=o,a.inflateResetKeep=r,a.inflateInit=function(t){return l(t,_t)},a.inflateInit2=l,a.inflate=function(t,e){var a,n,r,s,o,l,f,_,dt,ft,_t,ut,ct,bt,gt,mt,wt,pt,vt,kt,yt,xt,zt,Bt,St=0,Et=new u.Buf8(4),At=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!t||!t.state||!t.output||!t.input&&0!==t.avail_in)return E;(a=t.state).mode===M&&(a.mode=P),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,ft=l,_t=f,xt=z;t:for(;;)switch(a.mode){case N:if(0===a.wrap){a.mode=P;break}for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(2&a.wrap&&35615===_){a.check=0,Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0),_=0,dt=0,a.mode=O;break}if(a.flags=0,a.head&&(a.head.done=!1),!(1&a.wrap)||(((255&_)<<8)+(_>>8))%31){t.msg="incorrect header check",a.mode=ot;break}if((15&_)!==C){t.msg="unknown compression method",a.mode=ot;break}if(_>>>=4,dt-=4,yt=8+(15&_),0===a.wbits)a.wbits=yt;else if(yt>a.wbits){t.msg="invalid window size",a.mode=ot;break}a.dmax=1<<yt,t.adler=a.check=1,a.mode=512&_?j:M,_=0,dt=0;break;case O:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.flags=_,(255&a.flags)!==C){t.msg="unknown compression method",a.mode=ot;break}if(57344&a.flags){t.msg="unknown header flags set",a.mode=ot;break}a.head&&(a.head.text=_>>8&1),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=D;case D:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.time=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,Et[2]=_>>>16&255,Et[3]=_>>>24&255,a.check=b(a.check,Et,4,0)),_=0,dt=0,a.mode=I;case I:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.xflags=255&_,a.head.os=_>>8),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=U;case U:if(1024&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length=_,a.head&&(a.head.extra_len=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0}else a.head&&(a.head.extra=null);a.mode=T;case T:if(1024&a.flags&&((ut=a.length)>l&&(ut=l),ut&&(a.head&&(yt=a.head.extra_len-a.length,a.head.extra||(a.head.extra=new Array(a.head.extra_len)),u.arraySet(a.head.extra,n,s,ut,yt)),512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,a.length-=ut),a.length))break t;a.length=0,a.mode=F;case F:if(2048&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.name+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.name=null);a.length=0,a.mode=L;case L:if(4096&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.comment+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.comment=null);a.mode=H;case H:if(512&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(65535&a.check)){t.msg="header crc mismatch",a.mode=ot;break}_=0,dt=0}a.head&&(a.head.hcrc=a.flags>>9&1,a.head.done=!0),t.adler=a.check=0,a.mode=M;break;case j:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}t.adler=a.check=i(_),_=0,dt=0,a.mode=K;case K:if(0===a.havedict)return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,S;t.adler=a.check=1,a.mode=M;case M:if(e===y||e===x)break t;case P:if(a.last){_>>>=7&dt,dt-=7&dt,a.mode=nt;break}for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}switch(a.last=1&_,_>>>=1,dt-=1,3&_){case 0:a.mode=Y;break;case 1:if(h(a),a.mode=Q,e===x){_>>>=2,dt-=2;break t}break;case 2:a.mode=X;break;case 3:t.msg="invalid block type",a.mode=ot}_>>>=2,dt-=2;break;case Y:for(_>>>=7&dt,dt-=7&dt;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if((65535&_)!=(_>>>16^65535)){t.msg="invalid stored block lengths",a.mode=ot;break}if(a.length=65535&_,_=0,dt=0,a.mode=q,e===x)break t;case q:a.mode=G;case G:if(ut=a.length){if(ut>l&&(ut=l),ut>f&&(ut=f),0===ut)break t;u.arraySet(r,n,s,ut,o),l-=ut,s+=ut,f-=ut,o+=ut,a.length-=ut;break}a.mode=M;break;case X:for(;dt<14;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.nlen=257+(31&_),_>>>=5,dt-=5,a.ndist=1+(31&_),_>>>=5,dt-=5,a.ncode=4+(15&_),_>>>=4,dt-=4,a.nlen>286||a.ndist>30){t.msg="too many length or distance symbols",a.mode=ot;break}a.have=0,a.mode=W;case W:for(;a.have<a.ncode;){for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.lens[At[a.have++]]=7&_,_>>>=3,dt-=3}for(;a.have<19;)a.lens[At[a.have++]]=0;if(a.lencode=a.lendyn,a.lenbits=7,zt={bits:a.lenbits},xt=m(w,a.lens,0,19,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid code lengths set",a.mode=ot;break}a.have=0,a.mode=J;case J:for(;a.have<a.nlen+a.ndist;){for(;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(wt<16)_>>>=gt,dt-=gt,a.lens[a.have++]=wt;else{if(16===wt){for(Bt=gt+2;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_>>>=gt,dt-=gt,0===a.have){t.msg="invalid bit length repeat",a.mode=ot;break}yt=a.lens[a.have-1],ut=3+(3&_),_>>>=2,dt-=2}else if(17===wt){for(Bt=gt+3;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=3+(7&(_>>>=gt)),_>>>=3,dt-=3}else{for(Bt=gt+7;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=11+(127&(_>>>=gt)),_>>>=7,dt-=7}if(a.have+ut>a.nlen+a.ndist){t.msg="invalid bit length repeat",a.mode=ot;break}for(;ut--;)a.lens[a.have++]=yt}}if(a.mode===ot)break;if(0===a.lens[256]){t.msg="invalid code -- missing end-of-block",a.mode=ot;break}if(a.lenbits=9,zt={bits:a.lenbits},xt=m(p,a.lens,0,a.nlen,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid literal/lengths set",a.mode=ot;break}if(a.distbits=6,a.distcode=a.distdyn,zt={bits:a.distbits},xt=m(v,a.lens,a.nlen,a.ndist,a.distcode,0,a.work,zt),a.distbits=zt.bits,xt){t.msg="invalid distances set",a.mode=ot;break}if(a.mode=Q,e===x)break t;case Q:a.mode=V;case V:if(l>=6&&f>=258){t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,g(t,_t),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,a.mode===M&&(a.back=-1);break}for(a.back=0;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(mt&&0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.lencode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,a.length=wt,0===mt){a.mode=it;break}if(32&mt){a.back=-1,a.mode=M;break}if(64&mt){t.msg="invalid literal/length code",a.mode=ot;break}a.extra=15&mt,a.mode=$;case $:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}a.was=a.length,a.mode=tt;case tt:for(;St=a.distcode[_&(1<<a.distbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.distcode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,64&mt){t.msg="invalid distance code",a.mode=ot;break}a.offset=wt,a.extra=15&mt,a.mode=et;case et:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.offset+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}if(a.offset>a.dmax){t.msg="invalid distance too far back",a.mode=ot;break}a.mode=at;case at:if(0===f)break t;if(ut=_t-f,a.offset>ut){if((ut=a.offset-ut)>a.whave&&a.correct){t.msg="invalid distance too far back",a.mode=ot;break}ut>a.wnext?(ut-=a.wnext,ct=a.wsize-ut):ct=a.wnext-ut,ut>a.length&&(ut=a.length),bt=a.window}else bt=r,ct=o-a.offset,ut=a.length;ut>f&&(ut=f),f-=ut,a.length-=ut;do{r[o++]=bt[ct++]}while(--ut);0===a.length&&(a.mode=V);break;case it:if(0===f)break t;r[o++]=a.length,f--,a.mode=V;break;case nt:if(a.wrap){for(;dt<32;){if(0===l)break t;l--,_|=n[s++]<<dt,dt+=8}if(_t-=f,t.total_out+=_t,a.total+=_t,_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,o-_t):c(a.check,r,_t,o-_t)),_t=f,(a.flags?_:i(_))!==a.check){t.msg="incorrect data check",a.mode=ot;break}_=0,dt=0}a.mode=rt;case rt:if(a.wrap&&a.flags){for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(4294967295&a.total)){t.msg="incorrect length check",a.mode=ot;break}_=0,dt=0}a.mode=st;case st:xt=B;break t;case ot:xt=A;break t;case lt:return Z;case ht:default:return E}return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,(a.wsize||_t!==t.avail_out&&a.mode<ot&&(a.mode<nt||e!==k))&&d(t,t.output,t.next_out,_t-t.avail_out)?(a.mode=lt,Z):(ft-=t.avail_in,_t-=t.avail_out,t.total_in+=ft,t.total_out+=_t,a.total+=_t,a.wrap&&_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,t.next_out-_t):c(a.check,r,_t,t.next_out-_t)),t.data_type=a.bits+(a.last?64:0)+(a.mode===M?128:0)+(a.mode===Q||a.mode===q?256:0),(0===ft&&0===_t||e===k)&&xt===z&&(xt=R),xt)},a.inflateEnd=function(t){if(!t||!t.state)return E;var e=t.state;return e.window&&(e.window=null),t.state=null,z},a.inflateGetHeader=function(t,e){var a;return t&&t.state?0==(2&(a=t.state).wrap)?E:(a.head=e,e.done=!1,z):E},a.inflateSetDictionary=function(t,e){var a,i,n=e.length;return t&&t.state?0!==(a=t.state).wrap&&a.mode!==K?E:a.mode===K&&(i=1,(i=c(i,e,n,0))!==a.check)?A:d(t,e,n,n)?(a.mode=lt,Z):(a.havedict=1,z):E},a.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./inffast":10,"./inftrees":12}],12:[function(t,e,a){"use strict";var i=t("../utils/common"),n=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],r=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],s=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],o=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];e.exports=function(t,e,a,l,h,d,f,_){var u,c,b,g,m,w,p,v,k,y=_.bits,x=0,z=0,B=0,S=0,E=0,A=0,Z=0,R=0,C=0,N=0,O=null,D=0,I=new i.Buf16(16),U=new i.Buf16(16),T=null,F=0;for(x=0;x<=15;x++)I[x]=0;for(z=0;z<l;z++)I[e[a+z]]++;for(E=y,S=15;S>=1&&0===I[S];S--);if(E>S&&(E=S),0===S)return h[d++]=20971520,h[d++]=20971520,_.bits=1,0;for(B=1;B<S&&0===I[B];B++);for(E<B&&(E=B),R=1,x=1;x<=15;x++)if(R<<=1,(R-=I[x])<0)return-1;if(R>0&&(0===t||1!==S))return-1;for(U[1]=0,x=1;x<15;x++)U[x+1]=U[x]+I[x];for(z=0;z<l;z++)0!==e[a+z]&&(f[U[e[a+z]]++]=z);if(0===t?(O=T=f,w=19):1===t?(O=n,D-=257,T=r,F-=257,w=256):(O=s,T=o,w=-1),N=0,z=0,x=B,m=d,A=E,Z=0,b=-1,C=1<<E,g=C-1,1===t&&C>852||2===t&&C>592)return 1;for(;;){p=x-Z,f[z]<w?(v=0,k=f[z]):f[z]>w?(v=T[F+f[z]],k=O[D+f[z]]):(v=96,k=0),u=1<<x-Z,B=c=1<<A;do{h[m+(N>>Z)+(c-=u)]=p<<24|v<<16|k|0}while(0!==c);for(u=1<<x-1;N&u;)u>>=1;if(0!==u?(N&=u-1,N+=u):N=0,z++,0==--I[x]){if(x===S)break;x=e[a+f[z]]}if(x>E&&(N&g)!==b){for(0===Z&&(Z=E),m+=B,R=1<<(A=x-Z);A+Z<S&&!((R-=I[A+Z])<=0);)A++,R<<=1;if(C+=1<<A,1===t&&C>852||2===t&&C>592)return 1;h[b=N&g]=E<<24|A<<16|m-d|0}}return 0!==N&&(h[m+N]=x-Z<<24|64<<16|0),_.bits=E,0}},{"../utils/common":3}],13:[function(t,e,a){"use strict";e.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],14:[function(t,e,a){"use strict";function i(t){for(var e=t.length;--e>=0;)t[e]=0}function n(t,e,a,i,n){this.static_tree=t,this.extra_bits=e,this.extra_base=a,this.elems=i,this.max_length=n,this.has_stree=t&&t.length}function r(t,e){this.dyn_tree=t,this.max_code=0,this.stat_desc=e}function s(t){return t<256?et[t]:et[256+(t>>>7)]}function o(t,e){t.pending_buf[t.pending++]=255&e,t.pending_buf[t.pending++]=e>>>8&255}function l(t,e,a){t.bi_valid>M-a?(t.bi_buf|=e<<t.bi_valid&65535,o(t,t.bi_buf),t.bi_buf=e>>M-t.bi_valid,t.bi_valid+=a-M):(t.bi_buf|=e<<t.bi_valid&65535,t.bi_valid+=a)}function h(t,e,a){l(t,a[2*e],a[2*e+1])}function d(t,e){var a=0;do{a|=1&t,t>>>=1,a<<=1}while(--e>0);return a>>>1}function f(t){16===t.bi_valid?(o(t,t.bi_buf),t.bi_buf=0,t.bi_valid=0):t.bi_valid>=8&&(t.pending_buf[t.pending++]=255&t.bi_buf,t.bi_buf>>=8,t.bi_valid-=8)}function _(t,e){var a,i,n,r,s,o,l=e.dyn_tree,h=e.max_code,d=e.stat_desc.static_tree,f=e.stat_desc.has_stree,_=e.stat_desc.extra_bits,u=e.stat_desc.extra_base,c=e.stat_desc.max_length,b=0;for(r=0;r<=K;r++)t.bl_count[r]=0;for(l[2*t.heap[t.heap_max]+1]=0,a=t.heap_max+1;a<j;a++)(r=l[2*l[2*(i=t.heap[a])+1]+1]+1)>c&&(r=c,b++),l[2*i+1]=r,i>h||(t.bl_count[r]++,s=0,i>=u&&(s=_[i-u]),o=l[2*i],t.opt_len+=o*(r+s),f&&(t.static_len+=o*(d[2*i+1]+s)));if(0!==b){do{for(r=c-1;0===t.bl_count[r];)r--;t.bl_count[r]--,t.bl_count[r+1]+=2,t.bl_count[c]--,b-=2}while(b>0);for(r=c;0!==r;r--)for(i=t.bl_count[r];0!==i;)(n=t.heap[--a])>h||(l[2*n+1]!==r&&(t.opt_len+=(r-l[2*n+1])*l[2*n],l[2*n+1]=r),i--)}}function u(t,e,a){var i,n,r=new Array(K+1),s=0;for(i=1;i<=K;i++)r[i]=s=s+a[i-1]<<1;for(n=0;n<=e;n++){var o=t[2*n+1];0!==o&&(t[2*n]=d(r[o]++,o))}}function c(){var t,e,a,i,r,s=new Array(K+1);for(a=0,i=0;i<U-1;i++)for(it[i]=a,t=0;t<1<<W[i];t++)at[a++]=i;for(at[a-1]=i,r=0,i=0;i<16;i++)for(nt[i]=r,t=0;t<1<<J[i];t++)et[r++]=i;for(r>>=7;i<L;i++)for(nt[i]=r<<7,t=0;t<1<<J[i]-7;t++)et[256+r++]=i;for(e=0;e<=K;e++)s[e]=0;for(t=0;t<=143;)$[2*t+1]=8,t++,s[8]++;for(;t<=255;)$[2*t+1]=9,t++,s[9]++;for(;t<=279;)$[2*t+1]=7,t++,s[7]++;for(;t<=287;)$[2*t+1]=8,t++,s[8]++;for(u($,F+1,s),t=0;t<L;t++)tt[2*t+1]=5,tt[2*t]=d(t,5);rt=new n($,W,T+1,F,K),st=new n(tt,J,0,L,K),ot=new n(new Array(0),Q,0,H,P)}function b(t){var e;for(e=0;e<F;e++)t.dyn_ltree[2*e]=0;for(e=0;e<L;e++)t.dyn_dtree[2*e]=0;for(e=0;e<H;e++)t.bl_tree[2*e]=0;t.dyn_ltree[2*Y]=1,t.opt_len=t.static_len=0,t.last_lit=t.matches=0}function g(t){t.bi_valid>8?o(t,t.bi_buf):t.bi_valid>0&&(t.pending_buf[t.pending++]=t.bi_buf),t.bi_buf=0,t.bi_valid=0}function m(t,e,a,i){g(t),i&&(o(t,a),o(t,~a)),A.arraySet(t.pending_buf,t.window,e,a,t.pending),t.pending+=a}function w(t,e,a,i){var n=2*e,r=2*a;return t[n]<t[r]||t[n]===t[r]&&i[e]<=i[a]}function p(t,e,a){for(var i=t.heap[a],n=a<<1;n<=t.heap_len&&(n<t.heap_len&&w(e,t.heap[n+1],t.heap[n],t.depth)&&n++,!w(e,i,t.heap[n],t.depth));)t.heap[a]=t.heap[n],a=n,n<<=1;t.heap[a]=i}function v(t,e,a){var i,n,r,o,d=0;if(0!==t.last_lit)do{i=t.pending_buf[t.d_buf+2*d]<<8|t.pending_buf[t.d_buf+2*d+1],n=t.pending_buf[t.l_buf+d],d++,0===i?h(t,n,e):(h(t,(r=at[n])+T+1,e),0!==(o=W[r])&&l(t,n-=it[r],o),h(t,r=s(--i),a),0!==(o=J[r])&&l(t,i-=nt[r],o))}while(d<t.last_lit);h(t,Y,e)}function k(t,e){var a,i,n,r=e.dyn_tree,s=e.stat_desc.static_tree,o=e.stat_desc.has_stree,l=e.stat_desc.elems,h=-1;for(t.heap_len=0,t.heap_max=j,a=0;a<l;a++)0!==r[2*a]?(t.heap[++t.heap_len]=h=a,t.depth[a]=0):r[2*a+1]=0;for(;t.heap_len<2;)r[2*(n=t.heap[++t.heap_len]=h<2?++h:0)]=1,t.depth[n]=0,t.opt_len--,o&&(t.static_len-=s[2*n+1]);for(e.max_code=h,a=t.heap_len>>1;a>=1;a--)p(t,r,a);n=l;do{a=t.heap[1],t.heap[1]=t.heap[t.heap_len--],p(t,r,1),i=t.heap[1],t.heap[--t.heap_max]=a,t.heap[--t.heap_max]=i,r[2*n]=r[2*a]+r[2*i],t.depth[n]=(t.depth[a]>=t.depth[i]?t.depth[a]:t.depth[i])+1,r[2*a+1]=r[2*i+1]=n,t.heap[1]=n++,p(t,r,1)}while(t.heap_len>=2);t.heap[--t.heap_max]=t.heap[1],_(t,e),u(r,h,t.bl_count)}function y(t,e,a){var i,n,r=-1,s=e[1],o=0,l=7,h=4;for(0===s&&(l=138,h=3),e[2*(a+1)+1]=65535,i=0;i<=a;i++)n=s,s=e[2*(i+1)+1],++o<l&&n===s||(o<h?t.bl_tree[2*n]+=o:0!==n?(n!==r&&t.bl_tree[2*n]++,t.bl_tree[2*q]++):o<=10?t.bl_tree[2*G]++:t.bl_tree[2*X]++,o=0,r=n,0===s?(l=138,h=3):n===s?(l=6,h=3):(l=7,h=4))}function x(t,e,a){var i,n,r=-1,s=e[1],o=0,d=7,f=4;for(0===s&&(d=138,f=3),i=0;i<=a;i++)if(n=s,s=e[2*(i+1)+1],!(++o<d&&n===s)){if(o<f)do{h(t,n,t.bl_tree)}while(0!=--o);else 0!==n?(n!==r&&(h(t,n,t.bl_tree),o--),h(t,q,t.bl_tree),l(t,o-3,2)):o<=10?(h(t,G,t.bl_tree),l(t,o-3,3)):(h(t,X,t.bl_tree),l(t,o-11,7));o=0,r=n,0===s?(d=138,f=3):n===s?(d=6,f=3):(d=7,f=4)}}function z(t){var e;for(y(t,t.dyn_ltree,t.l_desc.max_code),y(t,t.dyn_dtree,t.d_desc.max_code),k(t,t.bl_desc),e=H-1;e>=3&&0===t.bl_tree[2*V[e]+1];e--);return t.opt_len+=3*(e+1)+5+5+4,e}function B(t,e,a,i){var n;for(l(t,e-257,5),l(t,a-1,5),l(t,i-4,4),n=0;n<i;n++)l(t,t.bl_tree[2*V[n]+1],3);x(t,t.dyn_ltree,e-1),x(t,t.dyn_dtree,a-1)}function S(t){var e,a=4093624447;for(e=0;e<=31;e++,a>>>=1)if(1&a&&0!==t.dyn_ltree[2*e])return R;if(0!==t.dyn_ltree[18]||0!==t.dyn_ltree[20]||0!==t.dyn_ltree[26])return C;for(e=32;e<T;e++)if(0!==t.dyn_ltree[2*e])return C;return R}function E(t,e,a,i){l(t,(O<<1)+(i?1:0),3),m(t,e,a,!0)}var A=t("../utils/common"),Z=4,R=0,C=1,N=2,O=0,D=1,I=2,U=29,T=256,F=T+1+U,L=30,H=19,j=2*F+1,K=15,M=16,P=7,Y=256,q=16,G=17,X=18,W=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],J=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],Q=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],V=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],$=new Array(2*(F+2));i($);var tt=new Array(2*L);i(tt);var et=new Array(512);i(et);var at=new Array(256);i(at);var it=new Array(U);i(it);var nt=new Array(L);i(nt);var rt,st,ot,lt=!1;a._tr_init=function(t){lt||(c(),lt=!0),t.l_desc=new r(t.dyn_ltree,rt),t.d_desc=new r(t.dyn_dtree,st),t.bl_desc=new r(t.bl_tree,ot),t.bi_buf=0,t.bi_valid=0,b(t)},a._tr_stored_block=E,a._tr_flush_block=function(t,e,a,i){var n,r,s=0;t.level>0?(t.strm.data_type===N&&(t.strm.data_type=S(t)),k(t,t.l_desc),k(t,t.d_desc),s=z(t),n=t.opt_len+3+7>>>3,(r=t.static_len+3+7>>>3)<=n&&(n=r)):n=r=a+5,a+4<=n&&-1!==e?E(t,e,a,i):t.strategy===Z||r===n?(l(t,(D<<1)+(i?1:0),3),v(t,$,tt)):(l(t,(I<<1)+(i?1:0),3),B(t,t.l_desc.max_code+1,t.d_desc.max_code+1,s+1),v(t,t.dyn_ltree,t.dyn_dtree)),b(t),i&&g(t)},a._tr_tally=function(t,e,a){return t.pending_buf[t.d_buf+2*t.last_lit]=e>>>8&255,t.pending_buf[t.d_buf+2*t.last_lit+1]=255&e,t.pending_buf[t.l_buf+t.last_lit]=255&a,t.last_lit++,0===e?t.dyn_ltree[2*a]++:(t.matches++,e--,t.dyn_ltree[2*(at[a]+T+1)]++,t.dyn_dtree[2*s(e)]++),t.last_lit===t.lit_bufsize-1},a._tr_align=function(t){l(t,D<<1,3),h(t,Y,$),f(t)}},{"../utils/common":3}],15:[function(t,e,a){"use strict";e.exports=function(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}},{}],"/":[function(t,e,a){"use strict";var i={};(0,t("./lib/utils/common").assign)(i,t("./lib/deflate"),t("./lib/inflate"),t("./lib/zlib/constants")),e.exports=i},{"./lib/deflate":1,"./lib/inflate":2,"./lib/utils/common":3,"./lib/zlib/constants":6}]},{},[])("/")});'use strict';tr.exportTo('tr.e.importer',function(){const GZIP_MEMBER_HEADER_ID_SIZE=3;const GZIP_HEADER_ID1=0x1f;const GZIP_HEADER_ID2=0x8b;const GZIP_DEFLATE_COMPRESSION=8;function _stringToUInt8Array(str){const array=new Uint8Array(str.length);for(let i=0;i<str.length;++i){array[i]=str.charCodeAt(i);} +return{InMemoryTraceStream,};});!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).pako=t()}}(function(){return function t(e,a,i){function n(s,o){if(!a[s]){if(!e[s]){var l="function"==typeof require&&require;if(!o&&l)return l(s,!0);if(r)return r(s,!0);var h=new Error("Cannot find module '"+s+"'");throw h.code="MODULE_NOT_FOUND",h}var d=a[s]={exports:{}};e[s][0].call(d.exports,function(t){var a=e[s][1][t];return n(a||t)},d,d.exports,t,e,a,i)}return a[s].exports}for(var r="function"==typeof require&&require,s=0;s<i.length;s++)n(i[s]);return n}({1:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({level:_,method:c,chunkSize:16384,windowBits:15,memLevel:8,strategy:u,to:""},t||{});var e=this.options;e.raw&&e.windowBits>0?e.windowBits=-e.windowBits:e.gzip&&e.windowBits>0&&e.windowBits<16&&(e.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new h,this.strm.avail_out=0;var a=r.deflateInit2(this.strm,e.level,e.method,e.windowBits,e.memLevel,e.strategy);if(a!==f)throw new Error(l[a]);if(e.header&&r.deflateSetHeader(this.strm,e.header),e.dictionary){var n;if(n="string"==typeof e.dictionary?o.string2buf(e.dictionary):"[object ArrayBuffer]"===d.call(e.dictionary)?new Uint8Array(e.dictionary):e.dictionary,(a=r.deflateSetDictionary(this.strm,n))!==f)throw new Error(l[a]);this._dict_set=!0}}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||l[a.err];return a.result}var r=t("./zlib/deflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/messages"),h=t("./zlib/zstream"),d=Object.prototype.toString,f=0,_=-1,u=0,c=8;i.prototype.push=function(t,e){var a,i,n=this.strm,l=this.options.chunkSize;if(this.ended)return!1;i=e===~~e?e:!0===e?4:0,"string"==typeof t?n.input=o.string2buf(t):"[object ArrayBuffer]"===d.call(t)?n.input=new Uint8Array(t):n.input=t,n.next_in=0,n.avail_in=n.input.length;do{if(0===n.avail_out&&(n.output=new s.Buf8(l),n.next_out=0,n.avail_out=l),1!==(a=r.deflate(n,i))&&a!==f)return this.onEnd(a),this.ended=!0,!1;0!==n.avail_out&&(0!==n.avail_in||4!==i&&2!==i)||("string"===this.options.to?this.onData(o.buf2binstring(s.shrinkBuf(n.output,n.next_out))):this.onData(s.shrinkBuf(n.output,n.next_out)))}while((n.avail_in>0||0===n.avail_out)&&1!==a);return 4===i?(a=r.deflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===f):2!==i||(this.onEnd(f),n.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===f&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Deflate=i,a.deflate=n,a.deflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.gzip=function(t,e){return e=e||{},e.gzip=!0,n(t,e)}},{"./utils/common":3,"./utils/strings":4,"./zlib/deflate":8,"./zlib/messages":13,"./zlib/zstream":15}],2:[function(t,e,a){"use strict";function i(t){if(!(this instanceof i))return new i(t);this.options=s.assign({chunkSize:16384,windowBits:0,to:""},t||{});var e=this.options;e.raw&&e.windowBits>=0&&e.windowBits<16&&(e.windowBits=-e.windowBits,0===e.windowBits&&(e.windowBits=-15)),!(e.windowBits>=0&&e.windowBits<16)||t&&t.windowBits||(e.windowBits+=32),e.windowBits>15&&e.windowBits<48&&0==(15&e.windowBits)&&(e.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new d,this.strm.avail_out=0;var a=r.inflateInit2(this.strm,e.windowBits);if(a!==l.Z_OK)throw new Error(h[a]);this.header=new f,r.inflateGetHeader(this.strm,this.header)}function n(t,e){var a=new i(e);if(a.push(t,!0),a.err)throw a.msg||h[a.err];return a.result}var r=t("./zlib/inflate"),s=t("./utils/common"),o=t("./utils/strings"),l=t("./zlib/constants"),h=t("./zlib/messages"),d=t("./zlib/zstream"),f=t("./zlib/gzheader"),_=Object.prototype.toString;i.prototype.push=function(t,e){var a,i,n,h,d,f,u=this.strm,c=this.options.chunkSize,b=this.options.dictionary,g=!1;if(this.ended)return!1;i=e===~~e?e:!0===e?l.Z_FINISH:l.Z_NO_FLUSH,"string"==typeof t?u.input=o.binstring2buf(t):"[object ArrayBuffer]"===_.call(t)?u.input=new Uint8Array(t):u.input=t,u.next_in=0,u.avail_in=u.input.length;do{if(0===u.avail_out&&(u.output=new s.Buf8(c),u.next_out=0,u.avail_out=c),(a=r.inflate(u,l.Z_NO_FLUSH))===l.Z_NEED_DICT&&b&&(f="string"==typeof b?o.string2buf(b):"[object ArrayBuffer]"===_.call(b)?new Uint8Array(b):b,a=r.inflateSetDictionary(this.strm,f)),a===l.Z_BUF_ERROR&&!0===g&&(a=l.Z_OK,g=!1),a!==l.Z_STREAM_END&&a!==l.Z_OK)return this.onEnd(a),this.ended=!0,!1;u.next_out&&(0!==u.avail_out&&a!==l.Z_STREAM_END&&(0!==u.avail_in||i!==l.Z_FINISH&&i!==l.Z_SYNC_FLUSH)||("string"===this.options.to?(n=o.utf8border(u.output,u.next_out),h=u.next_out-n,d=o.buf2string(u.output,n),u.next_out=h,u.avail_out=c-h,h&&s.arraySet(u.output,u.output,n,h,0),this.onData(d)):this.onData(s.shrinkBuf(u.output,u.next_out)))),0===u.avail_in&&0===u.avail_out&&(g=!0)}while((u.avail_in>0||0===u.avail_out)&&a!==l.Z_STREAM_END);return a===l.Z_STREAM_END&&(i=l.Z_FINISH),i===l.Z_FINISH?(a=r.inflateEnd(this.strm),this.onEnd(a),this.ended=!0,a===l.Z_OK):i!==l.Z_SYNC_FLUSH||(this.onEnd(l.Z_OK),u.avail_out=0,!0)},i.prototype.onData=function(t){this.chunks.push(t)},i.prototype.onEnd=function(t){t===l.Z_OK&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg},a.Inflate=i,a.inflate=n,a.inflateRaw=function(t,e){return e=e||{},e.raw=!0,n(t,e)},a.ungzip=n},{"./utils/common":3,"./utils/strings":4,"./zlib/constants":6,"./zlib/gzheader":9,"./zlib/inflate":11,"./zlib/messages":13,"./zlib/zstream":15}],3:[function(t,e,a){"use strict";function i(t,e){return Object.prototype.hasOwnProperty.call(t,e)}var n="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;a.assign=function(t){for(var e=Array.prototype.slice.call(arguments,1);e.length;){var a=e.shift();if(a){if("object"!=typeof a)throw new TypeError(a+"must be non-object");for(var n in a)i(a,n)&&(t[n]=a[n])}}return t},a.shrinkBuf=function(t,e){return t.length===e?t:t.subarray?t.subarray(0,e):(t.length=e,t)};var r={arraySet:function(t,e,a,i,n){if(e.subarray&&t.subarray)t.set(e.subarray(a,a+i),n);else for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){var e,a,i,n,r,s;for(i=0,e=0,a=t.length;e<a;e++)i+=t[e].length;for(s=new Uint8Array(i),n=0,e=0,a=t.length;e<a;e++)r=t[e],s.set(r,n),n+=r.length;return s}},s={arraySet:function(t,e,a,i,n){for(var r=0;r<i;r++)t[n+r]=e[a+r]},flattenChunks:function(t){return[].concat.apply([],t)}};a.setTyped=function(t){t?(a.Buf8=Uint8Array,a.Buf16=Uint16Array,a.Buf32=Int32Array,a.assign(a,r)):(a.Buf8=Array,a.Buf16=Array,a.Buf32=Array,a.assign(a,s))},a.setTyped(n)},{}],4:[function(t,e,a){"use strict";function i(t,e){if(e<65537&&(t.subarray&&s||!t.subarray&&r))return String.fromCharCode.apply(null,n.shrinkBuf(t,e));for(var a="",i=0;i<e;i++)a+=String.fromCharCode(t[i]);return a}var n=t("./common"),r=!0,s=!0;try{String.fromCharCode.apply(null,[0])}catch(t){r=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(t){s=!1}for(var o=new n.Buf8(256),l=0;l<256;l++)o[l]=l>=252?6:l>=248?5:l>=240?4:l>=224?3:l>=192?2:1;o[254]=o[254]=1,a.string2buf=function(t){var e,a,i,r,s,o=t.length,l=0;for(r=0;r<o;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),l+=a<128?1:a<2048?2:a<65536?3:4;for(e=new n.Buf8(l),s=0,r=0;s<l;r++)55296==(64512&(a=t.charCodeAt(r)))&&r+1<o&&56320==(64512&(i=t.charCodeAt(r+1)))&&(a=65536+(a-55296<<10)+(i-56320),r++),a<128?e[s++]=a:a<2048?(e[s++]=192|a>>>6,e[s++]=128|63&a):a<65536?(e[s++]=224|a>>>12,e[s++]=128|a>>>6&63,e[s++]=128|63&a):(e[s++]=240|a>>>18,e[s++]=128|a>>>12&63,e[s++]=128|a>>>6&63,e[s++]=128|63&a);return e},a.buf2binstring=function(t){return i(t,t.length)},a.binstring2buf=function(t){for(var e=new n.Buf8(t.length),a=0,i=e.length;a<i;a++)e[a]=t.charCodeAt(a);return e},a.buf2string=function(t,e){var a,n,r,s,l=e||t.length,h=new Array(2*l);for(n=0,a=0;a<l;)if((r=t[a++])<128)h[n++]=r;else if((s=o[r])>4)h[n++]=65533,a+=s-1;else{for(r&=2===s?31:3===s?15:7;s>1&&a<l;)r=r<<6|63&t[a++],s--;s>1?h[n++]=65533:r<65536?h[n++]=r:(r-=65536,h[n++]=55296|r>>10&1023,h[n++]=56320|1023&r)}return i(h,n)},a.utf8border=function(t,e){var a;for((e=e||t.length)>t.length&&(e=t.length),a=e-1;a>=0&&128==(192&t[a]);)a--;return a<0?e:0===a?e:a+o[t[a]]>e?a:e}},{"./common":3}],5:[function(t,e,a){"use strict";e.exports=function(t,e,a,i){for(var n=65535&t|0,r=t>>>16&65535|0,s=0;0!==a;){a-=s=a>2e3?2e3:a;do{r=r+(n=n+e[i++]|0)|0}while(--s);n%=65521,r%=65521}return n|r<<16|0}},{}],6:[function(t,e,a){"use strict";e.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],7:[function(t,e,a){"use strict";var i=function(){for(var t,e=[],a=0;a<256;a++){t=a;for(var i=0;i<8;i++)t=1&t?3988292384^t>>>1:t>>>1;e[a]=t}return e}();e.exports=function(t,e,a,n){var r=i,s=n+a;t^=-1;for(var o=n;o<s;o++)t=t>>>8^r[255&(t^e[o])];return-1^t}},{}],8:[function(t,e,a){"use strict";function i(t,e){return t.msg=A[e],e}function n(t){return(t<<1)-(t>4?9:0)}function r(t){for(var e=t.length;--e>=0;)t[e]=0}function s(t){var e=t.state,a=e.pending;a>t.avail_out&&(a=t.avail_out),0!==a&&(z.arraySet(t.output,e.pending_buf,e.pending_out,a,t.next_out),t.next_out+=a,e.pending_out+=a,t.total_out+=a,t.avail_out-=a,e.pending-=a,0===e.pending&&(e.pending_out=0))}function o(t,e){B._tr_flush_block(t,t.block_start>=0?t.block_start:-1,t.strstart-t.block_start,e),t.block_start=t.strstart,s(t.strm)}function l(t,e){t.pending_buf[t.pending++]=e}function h(t,e){t.pending_buf[t.pending++]=e>>>8&255,t.pending_buf[t.pending++]=255&e}function d(t,e,a,i){var n=t.avail_in;return n>i&&(n=i),0===n?0:(t.avail_in-=n,z.arraySet(e,t.input,t.next_in,n,a),1===t.state.wrap?t.adler=S(t.adler,e,n,a):2===t.state.wrap&&(t.adler=E(t.adler,e,n,a)),t.next_in+=n,t.total_in+=n,n)}function f(t,e){var a,i,n=t.max_chain_length,r=t.strstart,s=t.prev_length,o=t.nice_match,l=t.strstart>t.w_size-it?t.strstart-(t.w_size-it):0,h=t.window,d=t.w_mask,f=t.prev,_=t.strstart+at,u=h[r+s-1],c=h[r+s];t.prev_length>=t.good_match&&(n>>=2),o>t.lookahead&&(o=t.lookahead);do{if(a=e,h[a+s]===c&&h[a+s-1]===u&&h[a]===h[r]&&h[++a]===h[r+1]){r+=2,a++;do{}while(h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&r<_);if(i=at-(_-r),r=_-at,i>s){if(t.match_start=e,s=i,i>=o)break;u=h[r+s-1],c=h[r+s]}}}while((e=f[e&d])>l&&0!=--n);return s<=t.lookahead?s:t.lookahead}function _(t){var e,a,i,n,r,s=t.w_size;do{if(n=t.window_size-t.lookahead-t.strstart,t.strstart>=s+(s-it)){z.arraySet(t.window,t.window,s,s,0),t.match_start-=s,t.strstart-=s,t.block_start-=s,e=a=t.hash_size;do{i=t.head[--e],t.head[e]=i>=s?i-s:0}while(--a);e=a=s;do{i=t.prev[--e],t.prev[e]=i>=s?i-s:0}while(--a);n+=s}if(0===t.strm.avail_in)break;if(a=d(t.strm,t.window,t.strstart+t.lookahead,n),t.lookahead+=a,t.lookahead+t.insert>=et)for(r=t.strstart-t.insert,t.ins_h=t.window[r],t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+1])&t.hash_mask;t.insert&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[r+et-1])&t.hash_mask,t.prev[r&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=r,r++,t.insert--,!(t.lookahead+t.insert<et)););}while(t.lookahead<it&&0!==t.strm.avail_in)}function u(t,e){for(var a,i;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),0!==a&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a)),t.match_length>=et)if(i=B._tr_tally(t,t.strstart-t.match_start,t.match_length-et),t.lookahead-=t.match_length,t.match_length<=t.max_lazy_match&&t.lookahead>=et){t.match_length--;do{t.strstart++,t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart}while(0!=--t.match_length);t.strstart++}else t.strstart+=t.match_length,t.match_length=0,t.ins_h=t.window[t.strstart],t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+1])&t.hash_mask;else i=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++;if(i&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function c(t,e){for(var a,i,n;;){if(t.lookahead<it){if(_(t),t.lookahead<it&&e===Z)return _t;if(0===t.lookahead)break}if(a=0,t.lookahead>=et&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),t.prev_length=t.match_length,t.prev_match=t.match_start,t.match_length=et-1,0!==a&&t.prev_length<t.max_lazy_match&&t.strstart-a<=t.w_size-it&&(t.match_length=f(t,a),t.match_length<=5&&(t.strategy===H||t.match_length===et&&t.strstart-t.match_start>4096)&&(t.match_length=et-1)),t.prev_length>=et&&t.match_length<=t.prev_length){n=t.strstart+t.lookahead-et,i=B._tr_tally(t,t.strstart-1-t.prev_match,t.prev_length-et),t.lookahead-=t.prev_length-1,t.prev_length-=2;do{++t.strstart<=n&&(t.ins_h=(t.ins_h<<t.hash_shift^t.window[t.strstart+et-1])&t.hash_mask,a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart)}while(0!=--t.prev_length);if(t.match_available=0,t.match_length=et-1,t.strstart++,i&&(o(t,!1),0===t.strm.avail_out))return _t}else if(t.match_available){if((i=B._tr_tally(t,0,t.window[t.strstart-1]))&&o(t,!1),t.strstart++,t.lookahead--,0===t.strm.avail_out)return _t}else t.match_available=1,t.strstart++,t.lookahead--}return t.match_available&&(i=B._tr_tally(t,0,t.window[t.strstart-1]),t.match_available=0),t.insert=t.strstart<et-1?t.strstart:et-1,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function b(t,e){for(var a,i,n,r,s=t.window;;){if(t.lookahead<=at){if(_(t),t.lookahead<=at&&e===Z)return _t;if(0===t.lookahead)break}if(t.match_length=0,t.lookahead>=et&&t.strstart>0&&(n=t.strstart-1,(i=s[n])===s[++n]&&i===s[++n]&&i===s[++n])){r=t.strstart+at;do{}while(i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&i===s[++n]&&n<r);t.match_length=at-(r-n),t.match_length>t.lookahead&&(t.match_length=t.lookahead)}if(t.match_length>=et?(a=B._tr_tally(t,1,t.match_length-et),t.lookahead-=t.match_length,t.strstart+=t.match_length,t.match_length=0):(a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++),a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function g(t,e){for(var a;;){if(0===t.lookahead&&(_(t),0===t.lookahead)){if(e===Z)return _t;break}if(t.match_length=0,a=B._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++,a&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):t.last_lit&&(o(t,!1),0===t.strm.avail_out)?_t:ut}function m(t,e,a,i,n){this.good_length=t,this.max_lazy=e,this.nice_length=a,this.max_chain=i,this.func=n}function w(t){t.window_size=2*t.w_size,r(t.head),t.max_lazy_match=x[t.level].max_lazy,t.good_match=x[t.level].good_length,t.nice_match=x[t.level].nice_length,t.max_chain_length=x[t.level].max_chain,t.strstart=0,t.block_start=0,t.lookahead=0,t.insert=0,t.match_length=t.prev_length=et-1,t.match_available=0,t.ins_h=0}function p(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=q,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new z.Buf16(2*$),this.dyn_dtree=new z.Buf16(2*(2*Q+1)),this.bl_tree=new z.Buf16(2*(2*V+1)),r(this.dyn_ltree),r(this.dyn_dtree),r(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new z.Buf16(tt+1),this.heap=new z.Buf16(2*J+1),r(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new z.Buf16(2*J+1),r(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function v(t){var e;return t&&t.state?(t.total_in=t.total_out=0,t.data_type=Y,e=t.state,e.pending=0,e.pending_out=0,e.wrap<0&&(e.wrap=-e.wrap),e.status=e.wrap?rt:dt,t.adler=2===e.wrap?0:1,e.last_flush=Z,B._tr_init(e),D):i(t,U)}function k(t){var e=v(t);return e===D&&w(t.state),e}function y(t,e,a,n,r,s){if(!t)return U;var o=1;if(e===L&&(e=6),n<0?(o=0,n=-n):n>15&&(o=2,n-=16),r<1||r>G||a!==q||n<8||n>15||e<0||e>9||s<0||s>M)return i(t,U);8===n&&(n=9);var l=new p;return t.state=l,l.strm=t,l.wrap=o,l.gzhead=null,l.w_bits=n,l.w_size=1<<l.w_bits,l.w_mask=l.w_size-1,l.hash_bits=r+7,l.hash_size=1<<l.hash_bits,l.hash_mask=l.hash_size-1,l.hash_shift=~~((l.hash_bits+et-1)/et),l.window=new z.Buf8(2*l.w_size),l.head=new z.Buf16(l.hash_size),l.prev=new z.Buf16(l.w_size),l.lit_bufsize=1<<r+6,l.pending_buf_size=4*l.lit_bufsize,l.pending_buf=new z.Buf8(l.pending_buf_size),l.d_buf=1*l.lit_bufsize,l.l_buf=3*l.lit_bufsize,l.level=e,l.strategy=s,l.method=a,k(t)}var x,z=t("../utils/common"),B=t("./trees"),S=t("./adler32"),E=t("./crc32"),A=t("./messages"),Z=0,R=1,C=3,N=4,O=5,D=0,I=1,U=-2,T=-3,F=-5,L=-1,H=1,j=2,K=3,M=4,P=0,Y=2,q=8,G=9,X=15,W=8,J=286,Q=30,V=19,$=2*J+1,tt=15,et=3,at=258,it=at+et+1,nt=32,rt=42,st=69,ot=73,lt=91,ht=103,dt=113,ft=666,_t=1,ut=2,ct=3,bt=4,gt=3;x=[new m(0,0,0,0,function(t,e){var a=65535;for(a>t.pending_buf_size-5&&(a=t.pending_buf_size-5);;){if(t.lookahead<=1){if(_(t),0===t.lookahead&&e===Z)return _t;if(0===t.lookahead)break}t.strstart+=t.lookahead,t.lookahead=0;var i=t.block_start+a;if((0===t.strstart||t.strstart>=i)&&(t.lookahead=t.strstart-i,t.strstart=i,o(t,!1),0===t.strm.avail_out))return _t;if(t.strstart-t.block_start>=t.w_size-it&&(o(t,!1),0===t.strm.avail_out))return _t}return t.insert=0,e===N?(o(t,!0),0===t.strm.avail_out?ct:bt):(t.strstart>t.block_start&&(o(t,!1),t.strm.avail_out),_t)}),new m(4,4,8,4,u),new m(4,5,16,8,u),new m(4,6,32,32,u),new m(4,4,16,16,c),new m(8,16,32,32,c),new m(8,16,128,128,c),new m(8,32,128,256,c),new m(32,128,258,1024,c),new m(32,258,258,4096,c)],a.deflateInit=function(t,e){return y(t,e,q,X,W,P)},a.deflateInit2=y,a.deflateReset=k,a.deflateResetKeep=v,a.deflateSetHeader=function(t,e){return t&&t.state?2!==t.state.wrap?U:(t.state.gzhead=e,D):U},a.deflate=function(t,e){var a,o,d,f;if(!t||!t.state||e>O||e<0)return t?i(t,U):U;if(o=t.state,!t.output||!t.input&&0!==t.avail_in||o.status===ft&&e!==N)return i(t,0===t.avail_out?F:U);if(o.strm=t,a=o.last_flush,o.last_flush=e,o.status===rt)if(2===o.wrap)t.adler=0,l(o,31),l(o,139),l(o,8),o.gzhead?(l(o,(o.gzhead.text?1:0)+(o.gzhead.hcrc?2:0)+(o.gzhead.extra?4:0)+(o.gzhead.name?8:0)+(o.gzhead.comment?16:0)),l(o,255&o.gzhead.time),l(o,o.gzhead.time>>8&255),l(o,o.gzhead.time>>16&255),l(o,o.gzhead.time>>24&255),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,255&o.gzhead.os),o.gzhead.extra&&o.gzhead.extra.length&&(l(o,255&o.gzhead.extra.length),l(o,o.gzhead.extra.length>>8&255)),o.gzhead.hcrc&&(t.adler=E(t.adler,o.pending_buf,o.pending,0)),o.gzindex=0,o.status=st):(l(o,0),l(o,0),l(o,0),l(o,0),l(o,0),l(o,9===o.level?2:o.strategy>=j||o.level<2?4:0),l(o,gt),o.status=dt);else{var _=q+(o.w_bits-8<<4)<<8;_|=(o.strategy>=j||o.level<2?0:o.level<6?1:6===o.level?2:3)<<6,0!==o.strstart&&(_|=nt),_+=31-_%31,o.status=dt,h(o,_),0!==o.strstart&&(h(o,t.adler>>>16),h(o,65535&t.adler)),t.adler=1}if(o.status===st)if(o.gzhead.extra){for(d=o.pending;o.gzindex<(65535&o.gzhead.extra.length)&&(o.pending!==o.pending_buf_size||(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending!==o.pending_buf_size));)l(o,255&o.gzhead.extra[o.gzindex]),o.gzindex++;o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),o.gzindex===o.gzhead.extra.length&&(o.gzindex=0,o.status=ot)}else o.status=ot;if(o.status===ot)if(o.gzhead.name){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.name.length?255&o.gzhead.name.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.gzindex=0,o.status=lt)}else o.status=lt;if(o.status===lt)if(o.gzhead.comment){d=o.pending;do{if(o.pending===o.pending_buf_size&&(o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),s(t),d=o.pending,o.pending===o.pending_buf_size)){f=1;break}f=o.gzindex<o.gzhead.comment.length?255&o.gzhead.comment.charCodeAt(o.gzindex++):0,l(o,f)}while(0!==f);o.gzhead.hcrc&&o.pending>d&&(t.adler=E(t.adler,o.pending_buf,o.pending-d,d)),0===f&&(o.status=ht)}else o.status=ht;if(o.status===ht&&(o.gzhead.hcrc?(o.pending+2>o.pending_buf_size&&s(t),o.pending+2<=o.pending_buf_size&&(l(o,255&t.adler),l(o,t.adler>>8&255),t.adler=0,o.status=dt)):o.status=dt),0!==o.pending){if(s(t),0===t.avail_out)return o.last_flush=-1,D}else if(0===t.avail_in&&n(e)<=n(a)&&e!==N)return i(t,F);if(o.status===ft&&0!==t.avail_in)return i(t,F);if(0!==t.avail_in||0!==o.lookahead||e!==Z&&o.status!==ft){var u=o.strategy===j?g(o,e):o.strategy===K?b(o,e):x[o.level].func(o,e);if(u!==ct&&u!==bt||(o.status=ft),u===_t||u===ct)return 0===t.avail_out&&(o.last_flush=-1),D;if(u===ut&&(e===R?B._tr_align(o):e!==O&&(B._tr_stored_block(o,0,0,!1),e===C&&(r(o.head),0===o.lookahead&&(o.strstart=0,o.block_start=0,o.insert=0))),s(t),0===t.avail_out))return o.last_flush=-1,D}return e!==N?D:o.wrap<=0?I:(2===o.wrap?(l(o,255&t.adler),l(o,t.adler>>8&255),l(o,t.adler>>16&255),l(o,t.adler>>24&255),l(o,255&t.total_in),l(o,t.total_in>>8&255),l(o,t.total_in>>16&255),l(o,t.total_in>>24&255)):(h(o,t.adler>>>16),h(o,65535&t.adler)),s(t),o.wrap>0&&(o.wrap=-o.wrap),0!==o.pending?D:I)},a.deflateEnd=function(t){var e;return t&&t.state?(e=t.state.status)!==rt&&e!==st&&e!==ot&&e!==lt&&e!==ht&&e!==dt&&e!==ft?i(t,U):(t.state=null,e===dt?i(t,T):D):U},a.deflateSetDictionary=function(t,e){var a,i,n,s,o,l,h,d,f=e.length;if(!t||!t.state)return U;if(a=t.state,2===(s=a.wrap)||1===s&&a.status!==rt||a.lookahead)return U;for(1===s&&(t.adler=S(t.adler,e,f,0)),a.wrap=0,f>=a.w_size&&(0===s&&(r(a.head),a.strstart=0,a.block_start=0,a.insert=0),d=new z.Buf8(a.w_size),z.arraySet(d,e,f-a.w_size,a.w_size,0),e=d,f=a.w_size),o=t.avail_in,l=t.next_in,h=t.input,t.avail_in=f,t.next_in=0,t.input=e,_(a);a.lookahead>=et;){i=a.strstart,n=a.lookahead-(et-1);do{a.ins_h=(a.ins_h<<a.hash_shift^a.window[i+et-1])&a.hash_mask,a.prev[i&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=i,i++}while(--n);a.strstart=i,a.lookahead=et-1,_(a)}return a.strstart+=a.lookahead,a.block_start=a.strstart,a.insert=a.lookahead,a.lookahead=0,a.match_length=a.prev_length=et-1,a.match_available=0,t.next_in=l,t.input=h,t.avail_in=o,a.wrap=s,D},a.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./messages":13,"./trees":14}],9:[function(t,e,a){"use strict";e.exports=function(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}},{}],10:[function(t,e,a){"use strict";e.exports=function(t,e){var a,i,n,r,s,o,l,h,d,f,_,u,c,b,g,m,w,p,v,k,y,x,z,B,S;a=t.state,i=t.next_in,B=t.input,n=i+(t.avail_in-5),r=t.next_out,S=t.output,s=r-(e-t.avail_out),o=r+(t.avail_out-257),l=a.dmax,h=a.wsize,d=a.whave,f=a.wnext,_=a.window,u=a.hold,c=a.bits,b=a.lencode,g=a.distcode,m=(1<<a.lenbits)-1,w=(1<<a.distbits)-1;t:do{c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=b[u&m];e:for(;;){if(v=p>>>24,u>>>=v,c-=v,0===(v=p>>>16&255))S[r++]=65535&p;else{if(!(16&v)){if(0==(64&v)){p=b[(65535&p)+(u&(1<<v)-1)];continue e}if(32&v){a.mode=12;break t}t.msg="invalid literal/length code",a.mode=30;break t}k=65535&p,(v&=15)&&(c<v&&(u+=B[i++]<<c,c+=8),k+=u&(1<<v)-1,u>>>=v,c-=v),c<15&&(u+=B[i++]<<c,c+=8,u+=B[i++]<<c,c+=8),p=g[u&w];a:for(;;){if(v=p>>>24,u>>>=v,c-=v,!(16&(v=p>>>16&255))){if(0==(64&v)){p=g[(65535&p)+(u&(1<<v)-1)];continue a}t.msg="invalid distance code",a.mode=30;break t}if(y=65535&p,v&=15,c<v&&(u+=B[i++]<<c,(c+=8)<v&&(u+=B[i++]<<c,c+=8)),(y+=u&(1<<v)-1)>l){t.msg="invalid distance too far back",a.mode=30;break t}if(u>>>=v,c-=v,v=r-s,y>v){if((v=y-v)>d&&a.sane){t.msg="invalid distance too far back",a.mode=30;break t}if(x=0,z=_,0===f){if(x+=h-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}else if(f<v){if(x+=h+f-v,(v-=f)<k){k-=v;do{S[r++]=_[x++]}while(--v);if(x=0,f<k){k-=v=f;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}}}else if(x+=f-v,v<k){k-=v;do{S[r++]=_[x++]}while(--v);x=r-y,z=S}for(;k>2;)S[r++]=z[x++],S[r++]=z[x++],S[r++]=z[x++],k-=3;k&&(S[r++]=z[x++],k>1&&(S[r++]=z[x++]))}else{x=r-y;do{S[r++]=S[x++],S[r++]=S[x++],S[r++]=S[x++],k-=3}while(k>2);k&&(S[r++]=S[x++],k>1&&(S[r++]=S[x++]))}break}}break}}while(i<n&&r<o);i-=k=c>>3,u&=(1<<(c-=k<<3))-1,t.next_in=i,t.next_out=r,t.avail_in=i<n?n-i+5:5-(i-n),t.avail_out=r<o?o-r+257:257-(r-o),a.hold=u,a.bits=c}},{}],11:[function(t,e,a){"use strict";function i(t){return(t>>>24&255)+(t>>>8&65280)+((65280&t)<<8)+((255&t)<<24)}function n(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new u.Buf16(320),this.work=new u.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function r(t){var e;return t&&t.state?(e=t.state,t.total_in=t.total_out=e.total=0,t.msg="",e.wrap&&(t.adler=1&e.wrap),e.mode=N,e.last=0,e.havedict=0,e.dmax=32768,e.head=null,e.hold=0,e.bits=0,e.lencode=e.lendyn=new u.Buf32(dt),e.distcode=e.distdyn=new u.Buf32(ft),e.sane=1,e.back=-1,z):E}function s(t){var e;return t&&t.state?(e=t.state,e.wsize=0,e.whave=0,e.wnext=0,r(t)):E}function o(t,e){var a,i;return t&&t.state?(i=t.state,e<0?(a=0,e=-e):(a=1+(e>>4),e<48&&(e&=15)),e&&(e<8||e>15)?E:(null!==i.window&&i.wbits!==e&&(i.window=null),i.wrap=a,i.wbits=e,s(t))):E}function l(t,e){var a,i;return t?(i=new n,t.state=i,i.window=null,(a=o(t,e))!==z&&(t.state=null),a):E}function h(t){if(ut){var e;for(f=new u.Buf32(512),_=new u.Buf32(32),e=0;e<144;)t.lens[e++]=8;for(;e<256;)t.lens[e++]=9;for(;e<280;)t.lens[e++]=7;for(;e<288;)t.lens[e++]=8;for(m(p,t.lens,0,288,f,0,t.work,{bits:9}),e=0;e<32;)t.lens[e++]=5;m(v,t.lens,0,32,_,0,t.work,{bits:5}),ut=!1}t.lencode=f,t.lenbits=9,t.distcode=_,t.distbits=5}function d(t,e,a,i){var n,r=t.state;return null===r.window&&(r.wsize=1<<r.wbits,r.wnext=0,r.whave=0,r.window=new u.Buf8(r.wsize)),i>=r.wsize?(u.arraySet(r.window,e,a-r.wsize,r.wsize,0),r.wnext=0,r.whave=r.wsize):((n=r.wsize-r.wnext)>i&&(n=i),u.arraySet(r.window,e,a-i,n,r.wnext),(i-=n)?(u.arraySet(r.window,e,a-i,i,0),r.wnext=i,r.whave=r.wsize):(r.wnext+=n,r.wnext===r.wsize&&(r.wnext=0),r.whave<r.wsize&&(r.whave+=n))),0}var f,_,u=t("../utils/common"),c=t("./adler32"),b=t("./crc32"),g=t("./inffast"),m=t("./inftrees"),w=0,p=1,v=2,k=4,y=5,x=6,z=0,B=1,S=2,E=-2,A=-3,Z=-4,R=-5,C=8,N=1,O=2,D=3,I=4,U=5,T=6,F=7,L=8,H=9,j=10,K=11,M=12,P=13,Y=14,q=15,G=16,X=17,W=18,J=19,Q=20,V=21,$=22,tt=23,et=24,at=25,it=26,nt=27,rt=28,st=29,ot=30,lt=31,ht=32,dt=852,ft=592,_t=15,ut=!0;a.inflateReset=s,a.inflateReset2=o,a.inflateResetKeep=r,a.inflateInit=function(t){return l(t,_t)},a.inflateInit2=l,a.inflate=function(t,e){var a,n,r,s,o,l,f,_,dt,ft,_t,ut,ct,bt,gt,mt,wt,pt,vt,kt,yt,xt,zt,Bt,St=0,Et=new u.Buf8(4),At=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!t||!t.state||!t.output||!t.input&&0!==t.avail_in)return E;(a=t.state).mode===M&&(a.mode=P),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,ft=l,_t=f,xt=z;t:for(;;)switch(a.mode){case N:if(0===a.wrap){a.mode=P;break}for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(2&a.wrap&&35615===_){a.check=0,Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0),_=0,dt=0,a.mode=O;break}if(a.flags=0,a.head&&(a.head.done=!1),!(1&a.wrap)||(((255&_)<<8)+(_>>8))%31){t.msg="incorrect header check",a.mode=ot;break}if((15&_)!==C){t.msg="unknown compression method",a.mode=ot;break}if(_>>>=4,dt-=4,yt=8+(15&_),0===a.wbits)a.wbits=yt;else if(yt>a.wbits){t.msg="invalid window size",a.mode=ot;break}a.dmax=1<<yt,t.adler=a.check=1,a.mode=512&_?j:M,_=0,dt=0;break;case O:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.flags=_,(255&a.flags)!==C){t.msg="unknown compression method",a.mode=ot;break}if(57344&a.flags){t.msg="unknown header flags set",a.mode=ot;break}a.head&&(a.head.text=_>>8&1),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=D;case D:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.time=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,Et[2]=_>>>16&255,Et[3]=_>>>24&255,a.check=b(a.check,Et,4,0)),_=0,dt=0,a.mode=I;case I:for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.head&&(a.head.xflags=255&_,a.head.os=_>>8),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0,a.mode=U;case U:if(1024&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length=_,a.head&&(a.head.extra_len=_),512&a.flags&&(Et[0]=255&_,Et[1]=_>>>8&255,a.check=b(a.check,Et,2,0)),_=0,dt=0}else a.head&&(a.head.extra=null);a.mode=T;case T:if(1024&a.flags&&((ut=a.length)>l&&(ut=l),ut&&(a.head&&(yt=a.head.extra_len-a.length,a.head.extra||(a.head.extra=new Array(a.head.extra_len)),u.arraySet(a.head.extra,n,s,ut,yt)),512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,a.length-=ut),a.length))break t;a.length=0,a.mode=F;case F:if(2048&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.name+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.name=null);a.length=0,a.mode=L;case L:if(4096&a.flags){if(0===l)break t;ut=0;do{yt=n[s+ut++],a.head&&yt&&a.length<65536&&(a.head.comment+=String.fromCharCode(yt))}while(yt&&ut<l);if(512&a.flags&&(a.check=b(a.check,n,ut,s)),l-=ut,s+=ut,yt)break t}else a.head&&(a.head.comment=null);a.mode=H;case H:if(512&a.flags){for(;dt<16;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(65535&a.check)){t.msg="header crc mismatch",a.mode=ot;break}_=0,dt=0}a.head&&(a.head.hcrc=a.flags>>9&1,a.head.done=!0),t.adler=a.check=0,a.mode=M;break;case j:for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}t.adler=a.check=i(_),_=0,dt=0,a.mode=K;case K:if(0===a.havedict)return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,S;t.adler=a.check=1,a.mode=M;case M:if(e===y||e===x)break t;case P:if(a.last){_>>>=7&dt,dt-=7&dt,a.mode=nt;break}for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}switch(a.last=1&_,_>>>=1,dt-=1,3&_){case 0:a.mode=Y;break;case 1:if(h(a),a.mode=Q,e===x){_>>>=2,dt-=2;break t}break;case 2:a.mode=X;break;case 3:t.msg="invalid block type",a.mode=ot}_>>>=2,dt-=2;break;case Y:for(_>>>=7&dt,dt-=7&dt;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if((65535&_)!=(_>>>16^65535)){t.msg="invalid stored block lengths",a.mode=ot;break}if(a.length=65535&_,_=0,dt=0,a.mode=q,e===x)break t;case q:a.mode=G;case G:if(ut=a.length){if(ut>l&&(ut=l),ut>f&&(ut=f),0===ut)break t;u.arraySet(r,n,s,ut,o),l-=ut,s+=ut,f-=ut,o+=ut,a.length-=ut;break}a.mode=M;break;case X:for(;dt<14;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(a.nlen=257+(31&_),_>>>=5,dt-=5,a.ndist=1+(31&_),_>>>=5,dt-=5,a.ncode=4+(15&_),_>>>=4,dt-=4,a.nlen>286||a.ndist>30){t.msg="too many length or distance symbols",a.mode=ot;break}a.have=0,a.mode=W;case W:for(;a.have<a.ncode;){for(;dt<3;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.lens[At[a.have++]]=7&_,_>>>=3,dt-=3}for(;a.have<19;)a.lens[At[a.have++]]=0;if(a.lencode=a.lendyn,a.lenbits=7,zt={bits:a.lenbits},xt=m(w,a.lens,0,19,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid code lengths set",a.mode=ot;break}a.have=0,a.mode=J;case J:for(;a.have<a.nlen+a.ndist;){for(;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(wt<16)_>>>=gt,dt-=gt,a.lens[a.have++]=wt;else{if(16===wt){for(Bt=gt+2;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_>>>=gt,dt-=gt,0===a.have){t.msg="invalid bit length repeat",a.mode=ot;break}yt=a.lens[a.have-1],ut=3+(3&_),_>>>=2,dt-=2}else if(17===wt){for(Bt=gt+3;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=3+(7&(_>>>=gt)),_>>>=3,dt-=3}else{for(Bt=gt+7;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}dt-=gt,yt=0,ut=11+(127&(_>>>=gt)),_>>>=7,dt-=7}if(a.have+ut>a.nlen+a.ndist){t.msg="invalid bit length repeat",a.mode=ot;break}for(;ut--;)a.lens[a.have++]=yt}}if(a.mode===ot)break;if(0===a.lens[256]){t.msg="invalid code -- missing end-of-block",a.mode=ot;break}if(a.lenbits=9,zt={bits:a.lenbits},xt=m(p,a.lens,0,a.nlen,a.lencode,0,a.work,zt),a.lenbits=zt.bits,xt){t.msg="invalid literal/lengths set",a.mode=ot;break}if(a.distbits=6,a.distcode=a.distdyn,zt={bits:a.distbits},xt=m(v,a.lens,a.nlen,a.ndist,a.distcode,0,a.work,zt),a.distbits=zt.bits,xt){t.msg="invalid distances set",a.mode=ot;break}if(a.mode=Q,e===x)break t;case Q:a.mode=V;case V:if(l>=6&&f>=258){t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,g(t,_t),o=t.next_out,r=t.output,f=t.avail_out,s=t.next_in,n=t.input,l=t.avail_in,_=a.hold,dt=a.bits,a.mode===M&&(a.back=-1);break}for(a.back=0;St=a.lencode[_&(1<<a.lenbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(mt&&0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.lencode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,a.length=wt,0===mt){a.mode=it;break}if(32&mt){a.back=-1,a.mode=M;break}if(64&mt){t.msg="invalid literal/length code",a.mode=ot;break}a.extra=15&mt,a.mode=$;case $:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.length+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}a.was=a.length,a.mode=tt;case tt:for(;St=a.distcode[_&(1<<a.distbits)-1],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(0==(240&mt)){for(pt=gt,vt=mt,kt=wt;St=a.distcode[kt+((_&(1<<pt+vt)-1)>>pt)],gt=St>>>24,mt=St>>>16&255,wt=65535&St,!(pt+gt<=dt);){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}_>>>=pt,dt-=pt,a.back+=pt}if(_>>>=gt,dt-=gt,a.back+=gt,64&mt){t.msg="invalid distance code",a.mode=ot;break}a.offset=wt,a.extra=15&mt,a.mode=et;case et:if(a.extra){for(Bt=a.extra;dt<Bt;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}a.offset+=_&(1<<a.extra)-1,_>>>=a.extra,dt-=a.extra,a.back+=a.extra}if(a.offset>a.dmax){t.msg="invalid distance too far back",a.mode=ot;break}a.mode=at;case at:if(0===f)break t;if(ut=_t-f,a.offset>ut){if((ut=a.offset-ut)>a.whave&&a.sane){t.msg="invalid distance too far back",a.mode=ot;break}ut>a.wnext?(ut-=a.wnext,ct=a.wsize-ut):ct=a.wnext-ut,ut>a.length&&(ut=a.length),bt=a.window}else bt=r,ct=o-a.offset,ut=a.length;ut>f&&(ut=f),f-=ut,a.length-=ut;do{r[o++]=bt[ct++]}while(--ut);0===a.length&&(a.mode=V);break;case it:if(0===f)break t;r[o++]=a.length,f--,a.mode=V;break;case nt:if(a.wrap){for(;dt<32;){if(0===l)break t;l--,_|=n[s++]<<dt,dt+=8}if(_t-=f,t.total_out+=_t,a.total+=_t,_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,o-_t):c(a.check,r,_t,o-_t)),_t=f,(a.flags?_:i(_))!==a.check){t.msg="incorrect data check",a.mode=ot;break}_=0,dt=0}a.mode=rt;case rt:if(a.wrap&&a.flags){for(;dt<32;){if(0===l)break t;l--,_+=n[s++]<<dt,dt+=8}if(_!==(4294967295&a.total)){t.msg="incorrect length check",a.mode=ot;break}_=0,dt=0}a.mode=st;case st:xt=B;break t;case ot:xt=A;break t;case lt:return Z;case ht:default:return E}return t.next_out=o,t.avail_out=f,t.next_in=s,t.avail_in=l,a.hold=_,a.bits=dt,(a.wsize||_t!==t.avail_out&&a.mode<ot&&(a.mode<nt||e!==k))&&d(t,t.output,t.next_out,_t-t.avail_out)?(a.mode=lt,Z):(ft-=t.avail_in,_t-=t.avail_out,t.total_in+=ft,t.total_out+=_t,a.total+=_t,a.wrap&&_t&&(t.adler=a.check=a.flags?b(a.check,r,_t,t.next_out-_t):c(a.check,r,_t,t.next_out-_t)),t.data_type=a.bits+(a.last?64:0)+(a.mode===M?128:0)+(a.mode===Q||a.mode===q?256:0),(0===ft&&0===_t||e===k)&&xt===z&&(xt=R),xt)},a.inflateEnd=function(t){if(!t||!t.state)return E;var e=t.state;return e.window&&(e.window=null),t.state=null,z},a.inflateGetHeader=function(t,e){var a;return t&&t.state?0==(2&(a=t.state).wrap)?E:(a.head=e,e.done=!1,z):E},a.inflateSetDictionary=function(t,e){var a,i,n=e.length;return t&&t.state?0!==(a=t.state).wrap&&a.mode!==K?E:a.mode===K&&(i=1,(i=c(i,e,n,0))!==a.check)?A:d(t,e,n,n)?(a.mode=lt,Z):(a.havedict=1,z):E},a.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":3,"./adler32":5,"./crc32":7,"./inffast":10,"./inftrees":12}],12:[function(t,e,a){"use strict";var i=t("../utils/common"),n=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],r=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],s=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],o=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];e.exports=function(t,e,a,l,h,d,f,_){var u,c,b,g,m,w,p,v,k,y=_.bits,x=0,z=0,B=0,S=0,E=0,A=0,Z=0,R=0,C=0,N=0,O=null,D=0,I=new i.Buf16(16),U=new i.Buf16(16),T=null,F=0;for(x=0;x<=15;x++)I[x]=0;for(z=0;z<l;z++)I[e[a+z]]++;for(E=y,S=15;S>=1&&0===I[S];S--);if(E>S&&(E=S),0===S)return h[d++]=20971520,h[d++]=20971520,_.bits=1,0;for(B=1;B<S&&0===I[B];B++);for(E<B&&(E=B),R=1,x=1;x<=15;x++)if(R<<=1,(R-=I[x])<0)return-1;if(R>0&&(0===t||1!==S))return-1;for(U[1]=0,x=1;x<15;x++)U[x+1]=U[x]+I[x];for(z=0;z<l;z++)0!==e[a+z]&&(f[U[e[a+z]]++]=z);if(0===t?(O=T=f,w=19):1===t?(O=n,D-=257,T=r,F-=257,w=256):(O=s,T=o,w=-1),N=0,z=0,x=B,m=d,A=E,Z=0,b=-1,C=1<<E,g=C-1,1===t&&C>852||2===t&&C>592)return 1;for(;;){p=x-Z,f[z]<w?(v=0,k=f[z]):f[z]>w?(v=T[F+f[z]],k=O[D+f[z]]):(v=96,k=0),u=1<<x-Z,B=c=1<<A;do{h[m+(N>>Z)+(c-=u)]=p<<24|v<<16|k|0}while(0!==c);for(u=1<<x-1;N&u;)u>>=1;if(0!==u?(N&=u-1,N+=u):N=0,z++,0==--I[x]){if(x===S)break;x=e[a+f[z]]}if(x>E&&(N&g)!==b){for(0===Z&&(Z=E),m+=B,R=1<<(A=x-Z);A+Z<S&&!((R-=I[A+Z])<=0);)A++,R<<=1;if(C+=1<<A,1===t&&C>852||2===t&&C>592)return 1;h[b=N&g]=E<<24|A<<16|m-d|0}}return 0!==N&&(h[m+N]=x-Z<<24|64<<16|0),_.bits=E,0}},{"../utils/common":3}],13:[function(t,e,a){"use strict";e.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],14:[function(t,e,a){"use strict";function i(t){for(var e=t.length;--e>=0;)t[e]=0}function n(t,e,a,i,n){this.static_tree=t,this.extra_bits=e,this.extra_base=a,this.elems=i,this.max_length=n,this.has_stree=t&&t.length}function r(t,e){this.dyn_tree=t,this.max_code=0,this.stat_desc=e}function s(t){return t<256?et[t]:et[256+(t>>>7)]}function o(t,e){t.pending_buf[t.pending++]=255&e,t.pending_buf[t.pending++]=e>>>8&255}function l(t,e,a){t.bi_valid>M-a?(t.bi_buf|=e<<t.bi_valid&65535,o(t,t.bi_buf),t.bi_buf=e>>M-t.bi_valid,t.bi_valid+=a-M):(t.bi_buf|=e<<t.bi_valid&65535,t.bi_valid+=a)}function h(t,e,a){l(t,a[2*e],a[2*e+1])}function d(t,e){var a=0;do{a|=1&t,t>>>=1,a<<=1}while(--e>0);return a>>>1}function f(t){16===t.bi_valid?(o(t,t.bi_buf),t.bi_buf=0,t.bi_valid=0):t.bi_valid>=8&&(t.pending_buf[t.pending++]=255&t.bi_buf,t.bi_buf>>=8,t.bi_valid-=8)}function _(t,e){var a,i,n,r,s,o,l=e.dyn_tree,h=e.max_code,d=e.stat_desc.static_tree,f=e.stat_desc.has_stree,_=e.stat_desc.extra_bits,u=e.stat_desc.extra_base,c=e.stat_desc.max_length,b=0;for(r=0;r<=K;r++)t.bl_count[r]=0;for(l[2*t.heap[t.heap_max]+1]=0,a=t.heap_max+1;a<j;a++)(r=l[2*l[2*(i=t.heap[a])+1]+1]+1)>c&&(r=c,b++),l[2*i+1]=r,i>h||(t.bl_count[r]++,s=0,i>=u&&(s=_[i-u]),o=l[2*i],t.opt_len+=o*(r+s),f&&(t.static_len+=o*(d[2*i+1]+s)));if(0!==b){do{for(r=c-1;0===t.bl_count[r];)r--;t.bl_count[r]--,t.bl_count[r+1]+=2,t.bl_count[c]--,b-=2}while(b>0);for(r=c;0!==r;r--)for(i=t.bl_count[r];0!==i;)(n=t.heap[--a])>h||(l[2*n+1]!==r&&(t.opt_len+=(r-l[2*n+1])*l[2*n],l[2*n+1]=r),i--)}}function u(t,e,a){var i,n,r=new Array(K+1),s=0;for(i=1;i<=K;i++)r[i]=s=s+a[i-1]<<1;for(n=0;n<=e;n++){var o=t[2*n+1];0!==o&&(t[2*n]=d(r[o]++,o))}}function c(){var t,e,a,i,r,s=new Array(K+1);for(a=0,i=0;i<U-1;i++)for(it[i]=a,t=0;t<1<<W[i];t++)at[a++]=i;for(at[a-1]=i,r=0,i=0;i<16;i++)for(nt[i]=r,t=0;t<1<<J[i];t++)et[r++]=i;for(r>>=7;i<L;i++)for(nt[i]=r<<7,t=0;t<1<<J[i]-7;t++)et[256+r++]=i;for(e=0;e<=K;e++)s[e]=0;for(t=0;t<=143;)$[2*t+1]=8,t++,s[8]++;for(;t<=255;)$[2*t+1]=9,t++,s[9]++;for(;t<=279;)$[2*t+1]=7,t++,s[7]++;for(;t<=287;)$[2*t+1]=8,t++,s[8]++;for(u($,F+1,s),t=0;t<L;t++)tt[2*t+1]=5,tt[2*t]=d(t,5);rt=new n($,W,T+1,F,K),st=new n(tt,J,0,L,K),ot=new n(new Array(0),Q,0,H,P)}function b(t){var e;for(e=0;e<F;e++)t.dyn_ltree[2*e]=0;for(e=0;e<L;e++)t.dyn_dtree[2*e]=0;for(e=0;e<H;e++)t.bl_tree[2*e]=0;t.dyn_ltree[2*Y]=1,t.opt_len=t.static_len=0,t.last_lit=t.matches=0}function g(t){t.bi_valid>8?o(t,t.bi_buf):t.bi_valid>0&&(t.pending_buf[t.pending++]=t.bi_buf),t.bi_buf=0,t.bi_valid=0}function m(t,e,a,i){g(t),i&&(o(t,a),o(t,~a)),A.arraySet(t.pending_buf,t.window,e,a,t.pending),t.pending+=a}function w(t,e,a,i){var n=2*e,r=2*a;return t[n]<t[r]||t[n]===t[r]&&i[e]<=i[a]}function p(t,e,a){for(var i=t.heap[a],n=a<<1;n<=t.heap_len&&(n<t.heap_len&&w(e,t.heap[n+1],t.heap[n],t.depth)&&n++,!w(e,i,t.heap[n],t.depth));)t.heap[a]=t.heap[n],a=n,n<<=1;t.heap[a]=i}function v(t,e,a){var i,n,r,o,d=0;if(0!==t.last_lit)do{i=t.pending_buf[t.d_buf+2*d]<<8|t.pending_buf[t.d_buf+2*d+1],n=t.pending_buf[t.l_buf+d],d++,0===i?h(t,n,e):(h(t,(r=at[n])+T+1,e),0!==(o=W[r])&&l(t,n-=it[r],o),h(t,r=s(--i),a),0!==(o=J[r])&&l(t,i-=nt[r],o))}while(d<t.last_lit);h(t,Y,e)}function k(t,e){var a,i,n,r=e.dyn_tree,s=e.stat_desc.static_tree,o=e.stat_desc.has_stree,l=e.stat_desc.elems,h=-1;for(t.heap_len=0,t.heap_max=j,a=0;a<l;a++)0!==r[2*a]?(t.heap[++t.heap_len]=h=a,t.depth[a]=0):r[2*a+1]=0;for(;t.heap_len<2;)r[2*(n=t.heap[++t.heap_len]=h<2?++h:0)]=1,t.depth[n]=0,t.opt_len--,o&&(t.static_len-=s[2*n+1]);for(e.max_code=h,a=t.heap_len>>1;a>=1;a--)p(t,r,a);n=l;do{a=t.heap[1],t.heap[1]=t.heap[t.heap_len--],p(t,r,1),i=t.heap[1],t.heap[--t.heap_max]=a,t.heap[--t.heap_max]=i,r[2*n]=r[2*a]+r[2*i],t.depth[n]=(t.depth[a]>=t.depth[i]?t.depth[a]:t.depth[i])+1,r[2*a+1]=r[2*i+1]=n,t.heap[1]=n++,p(t,r,1)}while(t.heap_len>=2);t.heap[--t.heap_max]=t.heap[1],_(t,e),u(r,h,t.bl_count)}function y(t,e,a){var i,n,r=-1,s=e[1],o=0,l=7,h=4;for(0===s&&(l=138,h=3),e[2*(a+1)+1]=65535,i=0;i<=a;i++)n=s,s=e[2*(i+1)+1],++o<l&&n===s||(o<h?t.bl_tree[2*n]+=o:0!==n?(n!==r&&t.bl_tree[2*n]++,t.bl_tree[2*q]++):o<=10?t.bl_tree[2*G]++:t.bl_tree[2*X]++,o=0,r=n,0===s?(l=138,h=3):n===s?(l=6,h=3):(l=7,h=4))}function x(t,e,a){var i,n,r=-1,s=e[1],o=0,d=7,f=4;for(0===s&&(d=138,f=3),i=0;i<=a;i++)if(n=s,s=e[2*(i+1)+1],!(++o<d&&n===s)){if(o<f)do{h(t,n,t.bl_tree)}while(0!=--o);else 0!==n?(n!==r&&(h(t,n,t.bl_tree),o--),h(t,q,t.bl_tree),l(t,o-3,2)):o<=10?(h(t,G,t.bl_tree),l(t,o-3,3)):(h(t,X,t.bl_tree),l(t,o-11,7));o=0,r=n,0===s?(d=138,f=3):n===s?(d=6,f=3):(d=7,f=4)}}function z(t){var e;for(y(t,t.dyn_ltree,t.l_desc.max_code),y(t,t.dyn_dtree,t.d_desc.max_code),k(t,t.bl_desc),e=H-1;e>=3&&0===t.bl_tree[2*V[e]+1];e--);return t.opt_len+=3*(e+1)+5+5+4,e}function B(t,e,a,i){var n;for(l(t,e-257,5),l(t,a-1,5),l(t,i-4,4),n=0;n<i;n++)l(t,t.bl_tree[2*V[n]+1],3);x(t,t.dyn_ltree,e-1),x(t,t.dyn_dtree,a-1)}function S(t){var e,a=4093624447;for(e=0;e<=31;e++,a>>>=1)if(1&a&&0!==t.dyn_ltree[2*e])return R;if(0!==t.dyn_ltree[18]||0!==t.dyn_ltree[20]||0!==t.dyn_ltree[26])return C;for(e=32;e<T;e++)if(0!==t.dyn_ltree[2*e])return C;return R}function E(t,e,a,i){l(t,(O<<1)+(i?1:0),3),m(t,e,a,!0)}var A=t("../utils/common"),Z=4,R=0,C=1,N=2,O=0,D=1,I=2,U=29,T=256,F=T+1+U,L=30,H=19,j=2*F+1,K=15,M=16,P=7,Y=256,q=16,G=17,X=18,W=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],J=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],Q=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],V=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],$=new Array(2*(F+2));i($);var tt=new Array(2*L);i(tt);var et=new Array(512);i(et);var at=new Array(256);i(at);var it=new Array(U);i(it);var nt=new Array(L);i(nt);var rt,st,ot,lt=!1;a._tr_init=function(t){lt||(c(),lt=!0),t.l_desc=new r(t.dyn_ltree,rt),t.d_desc=new r(t.dyn_dtree,st),t.bl_desc=new r(t.bl_tree,ot),t.bi_buf=0,t.bi_valid=0,b(t)},a._tr_stored_block=E,a._tr_flush_block=function(t,e,a,i){var n,r,s=0;t.level>0?(t.strm.data_type===N&&(t.strm.data_type=S(t)),k(t,t.l_desc),k(t,t.d_desc),s=z(t),n=t.opt_len+3+7>>>3,(r=t.static_len+3+7>>>3)<=n&&(n=r)):n=r=a+5,a+4<=n&&-1!==e?E(t,e,a,i):t.strategy===Z||r===n?(l(t,(D<<1)+(i?1:0),3),v(t,$,tt)):(l(t,(I<<1)+(i?1:0),3),B(t,t.l_desc.max_code+1,t.d_desc.max_code+1,s+1),v(t,t.dyn_ltree,t.dyn_dtree)),b(t),i&&g(t)},a._tr_tally=function(t,e,a){return t.pending_buf[t.d_buf+2*t.last_lit]=e>>>8&255,t.pending_buf[t.d_buf+2*t.last_lit+1]=255&e,t.pending_buf[t.l_buf+t.last_lit]=255&a,t.last_lit++,0===e?t.dyn_ltree[2*a]++:(t.matches++,e--,t.dyn_ltree[2*(at[a]+T+1)]++,t.dyn_dtree[2*s(e)]++),t.last_lit===t.lit_bufsize-1},a._tr_align=function(t){l(t,D<<1,3),h(t,Y,$),f(t)}},{"../utils/common":3}],15:[function(t,e,a){"use strict";e.exports=function(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}},{}],"/":[function(t,e,a){"use strict";var i={};(0,t("./lib/utils/common").assign)(i,t("./lib/deflate"),t("./lib/inflate"),t("./lib/zlib/constants")),e.exports=i},{"./lib/deflate":1,"./lib/inflate":2,"./lib/utils/common":3,"./lib/zlib/constants":6}]},{},[])("/")});'use strict';tr.exportTo('tr.e.importer',function(){const GZIP_MEMBER_HEADER_ID_SIZE=3;const GZIP_HEADER_ID1=0x1f;const GZIP_HEADER_ID2=0x8b;const GZIP_DEFLATE_COMPRESSION=8;function _stringToUInt8Array(str){const array=new Uint8Array(str.length);for(let i=0;i<str.length;++i){array[i]=str.charCodeAt(i);} return array;} function GzipImporter(model,eventData){this.inflateAsTraceStream=false;if(typeof(eventData)==='string'||eventData instanceof String){eventData=_stringToUInt8Array(eventData);}else if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);}else if(eventData instanceof tr.b.InMemoryTraceStream){eventData=eventData.data;this.inflateAsTraceStream_=true;}else{throw new Error('Unknown gzip data format');} this.model_=model;this.gzipData_=eventData;} @@ -5251,7 +5251,7 @@ this.model_.samples.push(new tr.model.Sample(startInMs,'V8 PC',node,this.v8_thread_,undefined,1));},processDistortion_(distortionInPicoseconds){},processPlotRange_(start,end){},processV8Version_(major,minor,build,patch,candidate){},importEvents(){const logreader=new tr.e.importer.v8.LogReader({'timer-event':{parsers:[null,parseInt,parseInt],processor:this.processTimerEvent_.bind(this)},'shared-library':{parsers:[null,parseInt,parseInt],processor:this.processSharedLibrary_.bind(this)},'timer-event-start':{parsers:[null,parseInt],processor:this.processTimerEventStart_.bind(this)},'timer-event-end':{parsers:[null,parseInt],processor:this.processTimerEventEnd_.bind(this)},'code-creation':{parsers:[null,parseInt,parseInt,parseInt,null,'var-args'],processor:this.processCodeCreateEvent_.bind(this)},'code-move':{parsers:[parseInt,parseInt],processor:this.processCodeMoveEvent_.bind(this)},'code-delete':{parsers:[parseInt],processor:this.processCodeDeleteEvent_.bind(this)},'cpp':{parsers:[parseInt,parseInt,null],processor:this.processCppSymbol_.bind(this)},'tick':{parsers:[parseInt,parseInt,parseInt,parseInt,parseInt,'var-args'],processor:this.processTickEvent_.bind(this)},'distortion':{parsers:[parseInt],processor:this.processDistortion_.bind(this)},'plot-range':{parsers:[parseInt,parseInt],processor:this.processPlotRange_.bind(this)},'v8-version':{parsers:[parseInt,parseInt,parseInt,parseInt,parseInt],processor:this.processV8Version_.bind(this)}});this.v8_timer_thread_=this.model_.getOrCreateProcess(-32).getOrCreateThread(1);this.v8_timer_thread_.name='V8 Timers';this.v8_thread_=this.model_.getOrCreateProcess(-32).getOrCreateThread(2);this.v8_thread_.name='V8';const lines=this.logData_.split('\n');for(let i=0;i<lines.length;i++){logreader.processLogLine(lines[i]);} function addSlices(slices,thread){for(let i=0;i<slices.length;i++){const duration=slices[i].end-slices[i].start;const slice=new tr.model.ThreadSlice('v8',slices[i].name,ColorScheme.getColorIdForGeneralPurposeString(slices[i].name),slices[i].start,{},duration);thread.sliceGroup.pushSlice(slice);addSlices(slices[i].children,thread);}} addSlices(this.v8_stack_timeline_,this.v8_thread_);}};tr.importer.Importer.register(V8LogImporter);return{V8LogImporter,};});'use strict';if(tr.isVinn){global.window={};} -!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,f,g,h,i,j="",k=0;k<a.length;)b=a.charCodeAt(k++),c=a.charCodeAt(k++),e=a.charCodeAt(k++),f=b>>2,g=(3&b)<<4|c>>4,h=(15&c)<<2|e>>6,i=63&e,isNaN(c)?h=i=64:isNaN(e)&&(i=64),j=j+d.charAt(f)+d.charAt(g)+d.charAt(h)+d.charAt(i);return j},c.decode=function(a){var b,c,e,f,g,h,i,j="",k=0;for(a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");k<a.length;)f=d.indexOf(a.charAt(k++)),g=d.indexOf(a.charAt(k++)),h=d.indexOf(a.charAt(k++)),i=d.indexOf(a.charAt(k++)),b=f<<2|g>>4,c=(15&g)<<4|h>>2,e=(3&h)<<6|i,j+=String.fromCharCode(b),64!=h&&(j+=String.fromCharCode(c)),64!=i&&(j+=String.fromCharCode(e));return j}},{}],2:[function(a,b){"use strict";function c(){this.compressedSize=0,this.uncompressedSize=0,this.crc32=0,this.compressionMethod=null,this.compressedContent=null}c.prototype={getContent:function(){return null},getCompressedContent:function(){return null}},b.exports=c},{}],3:[function(a,b,c){"use strict";c.STORE={magic:"\x00\x00",compress:function(a){return a},uncompress:function(a){return a},compressInputType:null,uncompressInputType:null},c.DEFLATE=a("./flate")},{"./flate":8}],4:[function(a,b){"use strict";var c=a("./utils"),d=[0,1996959894,3993919788,2567524794,124634137,1886057615,3915621685,2657392035,249268274,2044508324,3772115230,2547177864,162941995,2125561021,3887607047,2428444049,498536548,1789927666,4089016648,2227061214,450548861,1843258603,4107580753,2211677639,325883990,1684777152,4251122042,2321926636,335633487,1661365465,4195302755,2366115317,997073096,1281953886,3579855332,2724688242,1006888145,1258607687,3524101629,2768942443,901097722,1119000684,3686517206,2898065728,853044451,1172266101,3705015759,2882616665,651767980,1373503546,3369554304,3218104598,565507253,1454621731,3485111705,3099436303,671266974,1594198024,3322730930,2970347812,795835527,1483230225,3244367275,3060149565,1994146192,31158534,2563907772,4023717930,1907459465,112637215,2680153253,3904427059,2013776290,251722036,2517215374,3775830040,2137656763,141376813,2439277719,3865271297,1802195444,476864866,2238001368,4066508878,1812370925,453092731,2181625025,4111451223,1706088902,314042704,2344532202,4240017532,1658658271,366619977,2362670323,4224994405,1303535960,984961486,2747007092,3569037538,1256170817,1037604311,2765210733,3554079995,1131014506,879679996,2909243462,3663771856,1141124467,855842277,2852801631,3708648649,1342533948,654459306,3188396048,3373015174,1466479909,544179635,3110523913,3462522015,1591671054,702138776,2966460450,3352799412,1504918807,783551873,3082640443,3233442989,3988292384,2596254646,62317068,1957810842,3939845945,2647816111,81470997,1943803523,3814918930,2489596804,225274430,2053790376,3826175755,2466906013,167816743,2097651377,4027552580,2265490386,503444072,1762050814,4150417245,2154129355,426522225,1852507879,4275313526,2312317920,282753626,1742555852,4189708143,2394877945,397917763,1622183637,3604390888,2714866558,953729732,1340076626,3518719985,2797360999,1068828381,1219638859,3624741850,2936675148,906185462,1090812512,3747672003,2825379669,829329135,1181335161,3412177804,3160834842,628085408,1382605366,3423369109,3138078467,570562233,1426400815,3317316542,2998733608,733239954,1555261956,3268935591,3050360625,752459403,1541320221,2607071920,3965973030,1969922972,40735498,2617837225,3943577151,1913087877,83908371,2512341634,3803740692,2075208622,213261112,2463272603,3855990285,2094854071,198958881,2262029012,4057260610,1759359992,534414190,2176718541,4139329115,1873836001,414664567,2282248934,4279200368,1711684554,285281116,2405801727,4167216745,1634467795,376229701,2685067896,3608007406,1308918612,956543938,2808555105,3495958263,1231636301,1047427035,2932959818,3654703836,1088359270,936918e3,2847714899,3736837829,1202900863,817233897,3183342108,3401237130,1404277552,615818150,3134207493,3453421203,1423857449,601450431,3009837614,3294710456,1567103746,711928724,3020668471,3272380065,1510334235,755167117];b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var e="string"!==c.getTypeOf(a);"undefined"==typeof b&&(b=0);var f=0,g=0,h=0;b=-1^b;for(var i=0,j=a.length;j>i;i++)h=e?a[i]:a.charCodeAt(i),g=255&(b^h),f=d[g],b=b>>>8^f;return-1^b}},{"./utils":21}],5:[function(a,b){"use strict";function c(){this.data=null,this.length=0,this.index=0}var d=a("./utils");c.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<a||0>a)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return d.transformTo("string",this.readData(a))},readData:function(){},lastIndexOfSignature:function(){},readDate:function(){var a=this.readInt(4);return new Date((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1)}},b.exports=c},{"./utils":21}],6:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!1,c.date=null,c.compression=null,c.comment=null},{}],7:[function(a,b,c){"use strict";var d=a("./utils");c.string2binary=function(a){return d.string2binary(a)},c.string2Uint8Array=function(a){return d.transformTo("uint8array",a)},c.uint8Array2String=function(a){return d.transformTo("string",a)},c.string2Blob=function(a){var b=d.transformTo("arraybuffer",a);return d.arrayBuffer2Blob(b)},c.arrayBuffer2Blob=function(a){return d.arrayBuffer2Blob(a)},c.transformTo=function(a,b){return d.transformTo(a,b)},c.getTypeOf=function(a){return d.getTypeOf(a)},c.checkSupport=function(a){return d.checkSupport(a)},c.MAX_VALUE_16BITS=d.MAX_VALUE_16BITS,c.MAX_VALUE_32BITS=d.MAX_VALUE_32BITS,c.pretty=function(a){return d.pretty(a)},c.findCompression=function(a){return d.findCompression(a)},c.isRegExp=function(a){return d.isRegExp(a)}},{"./utils":21}],8:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,e=a("pako");c.uncompressInputType=d?"uint8array":"array",c.compressInputType=d?"uint8array":"array",c.magic="\b\x00",c.compress=function(a){return e.deflateRaw(a)},c.uncompress=function(a){return e.inflateRaw(a)}},{pako:24}],9:[function(a,b){"use strict";function c(a,b){return this instanceof c?(this.files={},this.comment=null,this.root="",a&&this.load(a,b),void(this.clone=function(){var a=new c;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a})):new c(a,b)}var d=a("./base64");c.prototype=a("./object"),c.prototype.load=a("./load"),c.support=a("./support"),c.defaults=a("./defaults"),c.utils=a("./deprecatedPublicUtils"),c.base64={encode:function(a){return d.encode(a)},decode:function(a){return d.decode(a)}},c.compressions=a("./compressions"),b.exports=c},{"./base64":1,"./compressions":3,"./defaults":6,"./deprecatedPublicUtils":7,"./load":10,"./object":13,"./support":17}],10:[function(a,b){"use strict";var c=a("./base64"),d=a("./zipEntries");b.exports=function(a,b){var e,f,g,h;for(b=b||{},b.base64&&(a=c.decode(a)),f=new d(a,b),e=f.files,g=0;g<e.length;g++)h=e[g],this.file(h.fileName,h.decompressed,{binary:!0,optimizedBinaryString:!0,date:h.date,dir:h.dir,comment:h.fileComment.length?h.fileComment:null,createFolders:b.createFolders});return f.zipComment.length&&(this.comment=f.zipComment),this}},{"./base64":1,"./zipEntries":22}],11:[function(a,b){(function(a){"use strict";b.exports=function(b,c){return new a(b,c)},b.exports.test=function(b){return a.isBuffer(b)}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],12:[function(a,b){"use strict";function c(a){this.data=a,this.length=this.data.length,this.index=0}var d=a("./uint8ArrayReader");c.prototype=new d,c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./uint8ArrayReader":18}],13:[function(a,b){"use strict";var c=a("./support"),d=a("./utils"),e=a("./crc32"),f=a("./signature"),g=a("./defaults"),h=a("./base64"),i=a("./compressions"),j=a("./compressedObject"),k=a("./nodeBuffer"),l=a("./utf8"),m=a("./stringWriter"),n=a("./uint8ArrayWriter"),o=function(a){if(a._data instanceof j&&(a._data=a._data.getContent(),a.options.binary=!0,a.options.base64=!1,"uint8array"===d.getTypeOf(a._data))){var b=a._data;a._data=new Uint8Array(b.length),0!==b.length&&a._data.set(b,0)}return a._data},p=function(a){var b=o(a),e=d.getTypeOf(b);return"string"===e?!a.options.binary&&c.nodebuffer?k(b,"utf-8"):a.asBinary():b},q=function(a){var b=o(this);return null===b||"undefined"==typeof b?"":(this.options.base64&&(b=h.decode(b)),b=a&&this.options.binary?A.utf8decode(b):d.transformTo("string",b),a||this.options.binary||(b=d.transformTo("string",A.utf8encode(b))),b)},r=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this._data=b,this.options=c,this._initialMetadata={dir:c.dir,date:c.date}};r.prototype={asText:function(){return q.call(this,!0)},asBinary:function(){return q.call(this,!1)},asNodeBuffer:function(){var a=p(this);return d.transformTo("nodebuffer",a)},asUint8Array:function(){var a=p(this);return d.transformTo("uint8array",a)},asArrayBuffer:function(){return this.asUint8Array().buffer}};var s=function(a,b){var c,d="";for(c=0;b>c;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},t=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},u=function(a){return a=a||{},a.base64!==!0||null!==a.binary&&void 0!==a.binary||(a.binary=!0),a=t(a,g),a.date=a.date||new Date,null!==a.compression&&(a.compression=a.compression.toUpperCase()),a},v=function(a,b,c){var e,f=d.getTypeOf(b);if(c=u(c),c.createFolders&&(e=w(a))&&x.call(this,e,!0),c.dir||null===b||"undefined"==typeof b)c.base64=!1,c.binary=!1,b=null;else if("string"===f)c.binary&&!c.base64&&c.optimizedBinaryString!==!0&&(b=d.string2binary(b));else{if(c.base64=!1,c.binary=!0,!(f||b instanceof j))throw new Error("The data of '"+a+"' is in an unsupported format !");"arraybuffer"===f&&(b=d.transformTo("uint8array",b))}var g=new r(a,b,c);return this.files[a]=g,g},w=function(a){"/"==a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},x=function(a,b){return"/"!=a.slice(-1)&&(a+="/"),b="undefined"!=typeof b?b:!1,this.files[a]||v.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},y=function(a,b){var c,f=new j;return a._data instanceof j?(f.uncompressedSize=a._data.uncompressedSize,f.crc32=a._data.crc32,0===f.uncompressedSize||a.dir?(b=i.STORE,f.compressedContent="",f.crc32=0):a._data.compressionMethod===b.magic?f.compressedContent=a._data.getCompressedContent():(c=a._data.getContent(),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c)))):(c=p(a),(!c||0===c.length||a.dir)&&(b=i.STORE,c=""),f.uncompressedSize=c.length,f.crc32=e(c),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c))),f.compressedSize=f.compressedContent.length,f.compressionMethod=b.magic,f},z=function(a,b,c,g){var h,i,j,k,m=(c.compressedContent,d.transformTo("string",l.utf8encode(b.name))),n=b.comment||"",o=d.transformTo("string",l.utf8encode(n)),p=m.length!==b.name.length,q=o.length!==n.length,r=b.options,t="",u="",v="";j=b._initialMetadata.dir!==b.dir?b.dir:r.dir,k=b._initialMetadata.date!==b.date?b.date:r.date,h=k.getHours(),h<<=6,h|=k.getMinutes(),h<<=5,h|=k.getSeconds()/2,i=k.getFullYear()-1980,i<<=4,i|=k.getMonth()+1,i<<=5,i|=k.getDate(),p&&(u=s(1,1)+s(e(m),4)+m,t+="up"+s(u.length,2)+u),q&&(v=s(1,1)+s(this.crc32(o),4)+o,t+="uc"+s(v.length,2)+v);var w="";w+="\n\x00",w+=p||q?"\x00\b":"\x00\x00",w+=c.compressionMethod,w+=s(h,2),w+=s(i,2),w+=s(c.crc32,4),w+=s(c.compressedSize,4),w+=s(c.uncompressedSize,4),w+=s(m.length,2),w+=s(t.length,2);var x=f.LOCAL_FILE_HEADER+w+m+t,y=f.CENTRAL_FILE_HEADER+"\x00"+w+s(o.length,2)+"\x00\x00\x00\x00"+(j===!0?"\x00\x00\x00":"\x00\x00\x00\x00")+s(g,4)+m+t+o;return{fileRecord:x,dirRecord:y,compressedObject:c}},A={load:function(){throw new Error("Load method is not defined. Is the file jszip-load.js included ?")},filter:function(a){var b,c,d,e,f=[];for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],e=new r(d.name,d._data,t(d.options)),c=b.slice(this.root.length,b.length),b.slice(0,this.root.length)===this.root&&a(c,e)&&f.push(e));return f},file:function(a,b,c){if(1===arguments.length){if(d.isRegExp(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}return this.filter(function(b,c){return!c.dir&&b===a})[0]||null}return a=this.root+a,v.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d.isRegExp(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=x.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!=a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){a=t(a||{},{base64:!0,compression:"STORE",type:"base64",comment:null}),d.checkSupport(a.type);var b,c,e=[],g=0,j=0,k=d.transformTo("string",this.utf8encode(a.comment||this.comment||""));for(var l in this.files)if(this.files.hasOwnProperty(l)){var o=this.files[l],p=o.options.compression||a.compression.toUpperCase(),q=i[p];if(!q)throw new Error(p+" is not a valid compression method !");var r=y.call(this,o,q),u=z.call(this,l,o,r,g);g+=u.fileRecord.length+r.compressedSize,j+=u.dirRecord.length,e.push(u)}var v="";v=f.CENTRAL_DIRECTORY_END+"\x00\x00\x00\x00"+s(e.length,2)+s(e.length,2)+s(j,4)+s(g,4)+s(k.length,2)+k;var w=a.type.toLowerCase();for(b="uint8array"===w||"arraybuffer"===w||"blob"===w||"nodebuffer"===w?new n(g+j+v.length):new m(g+j+v.length),c=0;c<e.length;c++)b.append(e[c].fileRecord),b.append(e[c].compressedObject.compressedContent);for(c=0;c<e.length;c++)b.append(e[c].dirRecord);b.append(v);var x=b.finalize();switch(a.type.toLowerCase()){case"uint8array":case"arraybuffer":case"nodebuffer":return d.transformTo(a.type.toLowerCase(),x);case"blob":return d.arrayBuffer2Blob(d.transformTo("arraybuffer",x));case"base64":return a.base64?h.encode(x):x;default:return x}},crc32:function(a,b){return e(a,b)},utf8encode:function(a){return d.transformTo("string",l.utf8encode(a))},utf8decode:function(a){return l.utf8decode(a)}};b.exports=A},{"./base64":1,"./compressedObject":2,"./compressions":3,"./crc32":4,"./defaults":6,"./nodeBuffer":11,"./signature":14,"./stringWriter":16,"./support":17,"./uint8ArrayWriter":19,"./utf8":20,"./utils":21}],14:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],15:[function(a,b){"use strict";function c(a,b){this.data=a,b||(this.data=e.string2binary(this.data)),this.length=this.data.length,this.index=0}var d=a("./dataReader"),e=a("./utils");c.prototype=new d,c.prototype.byteAt=function(a){return this.data.charCodeAt(a)},c.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)},c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5,"./utils":21}],16:[function(a,b){"use strict";var c=a("./utils"),d=function(){this.data=[]};d.prototype={append:function(a){a=c.transformTo("string",a),this.data.push(a)},finalize:function(){return this.data.join("")}},b.exports=d},{"./utils":21}],17:[function(a,b,c){(function(a){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof a,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var b=new ArrayBuffer(0);try{c.blob=0===new Blob([b],{type:"application/zip"}).size}catch(d){try{var e=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,f=new e;f.append(b),c.blob=0===f.getBlob("application/zip").size}catch(d){c.blob=!1}}}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],18:[function(a,b){"use strict";function c(a){a&&(this.data=a,this.length=this.data.length,this.index=0)}var d=a("./dataReader");c.prototype=new d,c.prototype.byteAt=function(a){return this.data[a]},c.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f;return-1},c.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5}],19:[function(a,b){"use strict";var c=a("./utils"),d=function(a){this.data=new Uint8Array(a),this.index=0};d.prototype={append:function(a){0!==a.length&&(a=c.transformTo("uint8array",a),this.data.set(a,this.index),this.index+=a.length)},finalize:function(){return this.data}},b.exports=d},{"./utils":21}],20:[function(a,b,c){"use strict";for(var d=a("./utils"),e=a("./support"),f=a("./nodeBuffer"),g=new Array(256),h=0;256>h;h++)g[h]=h>=252?6:h>=248?5:h>=240?4:h>=224?3:h>=192?2:1;g[254]=g[254]=1;var i=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=e.uint8array?new Uint8Array(i):new Array(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},j=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+g[a[c]]>b?c:b},k=function(a){var b,c,e,f,h=a.length,i=new Array(2*h);for(c=0,b=0;h>b;)if(e=a[b++],128>e)i[c++]=e;else if(f=g[e],f>4)i[c++]=65533,b+=f-1;else{for(e&=2===f?31:3===f?15:7;f>1&&h>b;)e=e<<6|63&a[b++],f--;f>1?i[c++]=65533:65536>e?i[c++]=e:(e-=65536,i[c++]=55296|e>>10&1023,i[c++]=56320|1023&e)}return i.length!==c&&(i.subarray?i=i.subarray(0,c):i.length=c),d.applyFromCharCode(i)};c.utf8encode=function(a){return e.nodebuffer?f(a,"utf-8"):i(a)},c.utf8decode=function(a){if(e.nodebuffer)return d.transformTo("nodebuffer",a).toString("utf-8");a=d.transformTo(e.uint8array?"uint8array":"array",a);for(var b=[],c=0,f=a.length,g=65536;f>c;){var h=j(a,Math.min(c+g,f));b.push(e.uint8array?k(a.subarray(c,h)):k(a.slice(c,h))),c=h}return b.join("")}},{"./nodeBuffer":11,"./support":17,"./utils":21}],21:[function(a,b,c){"use strict";function d(a){return a}function e(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function f(a){var b=65536,d=[],e=a.length,f=c.getTypeOf(a),g=0,h=!0;try{switch(f){case"uint8array":String.fromCharCode.apply(null,new Uint8Array(0));break;case"nodebuffer":String.fromCharCode.apply(null,j(0))}}catch(i){h=!1}if(!h){for(var k="",l=0;l<a.length;l++)k+=String.fromCharCode(a[l]);return k}for(;e>g&&b>1;)try{d.push("array"===f||"nodebuffer"===f?String.fromCharCode.apply(null,a.slice(g,Math.min(g+b,e))):String.fromCharCode.apply(null,a.subarray(g,Math.min(g+b,e)))),g+=b}catch(i){b=Math.floor(b/2)}return d.join("")}function g(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];return b}var h=a("./support"),i=a("./compressions"),j=a("./nodeBuffer");c.string2binary=function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(255&a.charCodeAt(c));return b},c.arrayBuffer2Blob=function(a){c.checkSupport("blob");try{return new Blob([a],{type:"application/zip"})}catch(b){try{var d=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,e=new d;return e.append(a),e.getBlob("application/zip")}catch(b){throw new Error("Bug : can't construct the Blob.")}}},c.applyFromCharCode=f;var k={};k.string={string:d,array:function(a){return e(a,new Array(a.length))},arraybuffer:function(a){return k.string.uint8array(a).buffer},uint8array:function(a){return e(a,new Uint8Array(a.length))},nodebuffer:function(a){return e(a,j(a.length))}},k.array={string:f,array:d,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(a)}},k.arraybuffer={string:function(a){return f(new Uint8Array(a))},array:function(a){return g(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:d,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(new Uint8Array(a))}},k.uint8array={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:d,nodebuffer:function(a){return j(a)}},k.nodebuffer={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return k.nodebuffer.uint8array(a).buffer},uint8array:function(a){return g(a,new Uint8Array(a.length))},nodebuffer:d},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=k[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":h.nodebuffer&&j.test(a)?"nodebuffer":h.uint8array&&a instanceof Uint8Array?"uint8array":h.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=h[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this browser")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(16>b?"0":"")+b.toString(16).toUpperCase();return d},c.findCompression=function(a){for(var b in i)if(i.hasOwnProperty(b)&&i[b].magic===a)return i[b];return null},c.isRegExp=function(a){return"[object RegExp]"===Object.prototype.toString.call(a)}},{"./compressions":3,"./nodeBuffer":11,"./support":17}],22:[function(a,b){"use strict";function c(a,b){this.files=[],this.loadOptions=b,a&&this.load(a)}var d=a("./stringReader"),e=a("./nodeBufferReader"),f=a("./uint8ArrayReader"),g=a("./utils"),h=a("./signature"),i=a("./zipEntry"),j=a("./support"),k=a("./object");c.prototype={checkSignature:function(a){var b=this.reader.readString(4);if(b!==a)throw new Error("Corrupted zip or bug : unexpected signature ("+g.pretty(b)+", expected "+g.pretty(a)+")")},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2),this.zipComment=this.reader.readString(this.zipCommentLength),this.zipComment=k.utf8decode(this.zipComment)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.versionMadeBy=this.reader.readString(2),this.versionNeeded=this.reader.readInt(2),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;d>e;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readString(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(h.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readString(4)===h.CENTRAL_FILE_HEADER;)a=new i({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(h.CENTRAL_DIRECTORY_END);if(-1===a)throw new Error("Corrupted zip : can't find end of central directory");if(this.reader.setIndex(a),this.checkSignature(h.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===g.MAX_VALUE_16BITS||this.diskWithCentralDirStart===g.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===g.MAX_VALUE_16BITS||this.centralDirRecords===g.MAX_VALUE_16BITS||this.centralDirSize===g.MAX_VALUE_32BITS||this.centralDirOffset===g.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),-1===a)throw new Error("Corrupted zip : can't find the ZIP64 end of central directory locator");this.reader.setIndex(a),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}},prepareReader:function(a){var b=g.getTypeOf(a);this.reader="string"!==b||j.uint8array?"nodebuffer"===b?new e(a):new f(g.transformTo("uint8array",a)):new d(a,this.loadOptions.optimizedBinaryString)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=c},{"./nodeBufferReader":12,"./object":13,"./signature":14,"./stringReader":15,"./support":17,"./uint8ArrayReader":18,"./utils":21,"./zipEntry":23}],23:[function(a,b){"use strict";function c(a,b){this.options=a,this.loadOptions=b}var d=a("./stringReader"),e=a("./utils"),f=a("./compressedObject"),g=a("./object");c.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},prepareCompressedContent:function(a,b,c){return function(){var d=a.index;a.setIndex(b);var e=a.readData(c);return a.setIndex(d),e}},prepareContent:function(a,b,c,d,f){return function(){var a=e.transformTo(d.uncompressInputType,this.getCompressedContent()),b=d.uncompress(a);if(b.length!==f)throw new Error("Bug : uncompressed data size mismatch");return b}},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readString(this.fileNameLength),a.skip(c),-1==this.compressedSize||-1==this.uncompressedSize)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize == -1 || uncompressedSize == -1)");if(b=e.findCompression(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+e.pretty(this.compressionMethod)+" unknown (inner file : "+this.fileName+")");if(this.decompressed=new f,this.decompressed.compressedSize=this.compressedSize,this.decompressed.uncompressedSize=this.uncompressedSize,this.decompressed.crc32=this.crc32,this.decompressed.compressionMethod=this.compressionMethod,this.decompressed.getCompressedContent=this.prepareCompressedContent(a,a.index,this.compressedSize,b),this.decompressed.getContent=this.prepareContent(a,a.index,this.compressedSize,b,this.uncompressedSize),this.loadOptions.checkCRC32&&(this.decompressed=e.transformTo("string",this.decompressed.getContent()),g.crc32(this.decompressed)!==this.crc32))throw new Error("Corrupted zip : CRC32 mismatch")},readCentralPart:function(a){if(this.versionMadeBy=a.readString(2),this.versionNeeded=a.readInt(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4),this.fileNameLength=a.readInt(2),this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");this.fileName=a.readString(this.fileNameLength),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readString(this.fileCommentLength),this.dir=16&this.externalFileAttributes?!0:!1},parseZIP64ExtraField:function(){if(this.extraFields[1]){var a=new d(this.extraFields[1].value);this.uncompressedSize===e.MAX_VALUE_32BITS&&(this.uncompressedSize=a.readInt(8)),this.compressedSize===e.MAX_VALUE_32BITS&&(this.compressedSize=a.readInt(8)),this.localHeaderOffset===e.MAX_VALUE_32BITS&&(this.localHeaderOffset=a.readInt(8)),this.diskNumberStart===e.MAX_VALUE_32BITS&&(this.diskNumberStart=a.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index;for(this.extraFields=this.extraFields||{};a.index<e+this.extraFieldsLength;)b=a.readInt(2),c=a.readInt(2),d=a.readString(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){if(this.useUTF8())this.fileName=g.utf8decode(this.fileName),this.fileComment=g.utf8decode(this.fileComment);else{var a=this.findExtraFieldUnicodePath();null!==a&&(this.fileName=a);var b=this.findExtraFieldUnicodeComment();null!==b&&(this.fileComment=b)}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileName)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileComment)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null}},b.exports=c},{"./compressedObject":2,"./object":13,"./stringReader":15,"./utils":21}],24:[function(a,b){"use strict";var c=a("./lib/utils/common").assign,d=a("./lib/deflate"),e=a("./lib/inflate"),f=a("./lib/zlib/constants"),g={};c(g,d,e,f),b.exports=g},{"./lib/deflate":25,"./lib/inflate":26,"./lib/utils/common":27,"./lib/zlib/constants":30}],25:[function(a,b,c){"use strict";function d(a,b){var c=new s(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}function f(a,b){return b=b||{},b.gzip=!0,d(a,b)}var g=a("./zlib/deflate.js"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=0,m=4,n=0,o=1,p=-1,q=0,r=8,s=function(a){this.options=h.assign({level:p,method:r,chunkSize:16384,windowBits:15,memLevel:8,strategy:q,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=g.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==n)throw new Error(j[c]);b.header&&g.deflateSetHeader(this.strm,b.header)};s.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?m:l,e.input="string"==typeof a?i.string2buf(a):a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new h.Buf8(f),e.next_out=0,e.avail_out=f),c=g.deflate(e,d),c!==o&&c!==n)return this.onEnd(c),this.ended=!0,!1;(0===e.avail_out||0===e.avail_in&&d===m)&&this.onData("string"===this.options.to?i.buf2binstring(h.shrinkBuf(e.output,e.next_out)):h.shrinkBuf(e.output,e.next_out))}while((e.avail_in>0||0===e.avail_out)&&c!==o);return d===m?(c=g.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===n):!0},s.prototype.onData=function(a){this.chunks.push(a)},s.prototype.onEnd=function(a){a===n&&(this.result="string"===this.options.to?this.chunks.join(""):h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=s,c.deflate=d,c.deflateRaw=e,c.gzip=f},{"./utils/common":27,"./utils/strings":28,"./zlib/deflate.js":32,"./zlib/messages":37,"./zlib/zstream":39}],26:[function(a,b,c){"use strict";function d(a,b){var c=new m(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}var f=a("./zlib/inflate.js"),g=a("./utils/common"),h=a("./utils/strings"),i=a("./zlib/constants"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=a("./zlib/gzheader"),m=function(a){this.options=g.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=f.inflateInit2(this.strm,b.windowBits);if(c!==i.Z_OK)throw new Error(j[c]);this.header=new l,f.inflateGetHeader(this.strm,this.header)};m.prototype.push=function(a,b){var c,d,e,j,k,l=this.strm,m=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?i.Z_FINISH:i.Z_NO_FLUSH,l.input="string"==typeof a?h.binstring2buf(a):a,l.next_in=0,l.avail_in=l.input.length;do{if(0===l.avail_out&&(l.output=new g.Buf8(m),l.next_out=0,l.avail_out=m),c=f.inflate(l,i.Z_NO_FLUSH),c!==i.Z_STREAM_END&&c!==i.Z_OK)return this.onEnd(c),this.ended=!0,!1;l.next_out&&(0===l.avail_out||c===i.Z_STREAM_END||0===l.avail_in&&d===i.Z_FINISH)&&("string"===this.options.to?(e=h.utf8border(l.output,l.next_out),j=l.next_out-e,k=h.buf2string(l.output,e),l.next_out=j,l.avail_out=m-j,j&&g.arraySet(l.output,l.output,e,j,0),this.onData(k)):this.onData(g.shrinkBuf(l.output,l.next_out)))}while(l.avail_in>0&&c!==i.Z_STREAM_END);return c===i.Z_STREAM_END&&(d=i.Z_FINISH),d===i.Z_FINISH?(c=f.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===i.Z_OK):!0},m.prototype.onData=function(a){this.chunks.push(a)},m.prototype.onEnd=function(a){a===i.Z_OK&&(this.result="string"===this.options.to?this.chunks.join(""):g.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=m,c.inflate=d,c.inflateRaw=e,c.ungzip=d},{"./utils/common":27,"./utils/strings":28,"./zlib/constants":30,"./zlib/gzheader":33,"./zlib/inflate.js":35,"./zlib/messages":37,"./zlib/zstream":39}],27:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;c>b;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;c>b;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],28:[function(a,b,c){"use strict";function d(a,b){if(65537>b&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;b>d;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;256>j;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=new e.Buf8(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;d>c;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;h>c;)if(f=a[c++],128>f)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&h>c;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:65536>f?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":27}],29:[function(a,b){"use strict";function c(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0}b.exports=c},{}],30:[function(a,b){b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],31:[function(a,b){"use strict";function c(){for(var a,b=[],c=0;256>c;c++){a=c;for(var d=0;8>d;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function d(a,b,c,d){var f=e,g=d+c;a=-1^a;for(var h=d;g>h;h++)a=a>>>8^f[255&(a^b[h])];return-1^a}var e=c();b.exports=d},{}],32:[function(a,b,c){"use strict";function d(a,b){return a.msg=G[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(C.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){D._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,C.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=E(a.adler,b,e,c):2===a.state.wrap&&(a.adler=F(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-jb?a.strstart-(a.w_size-jb):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ib,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&m>f);if(d=ib-(m-f),f=m-ib,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-jb)){C.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=hb)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+hb-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<hb)););}while(a.lookahead<jb&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===H)return sb;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return sb;if(a.strstart-a.block_start>=a.w_size-jb&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?sb:sb}function o(a,b){for(var c,d;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c)),a.match_length>=hb)if(d=D._tr_tally(a,a.strstart-a.match_start,a.match_length-hb),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=hb){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function p(a,b){for(var c,d,e;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=hb-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===S||a.match_length===hb&&a.strstart-a.match_start>4096)&&(a.match_length=hb-1)),a.prev_length>=hb&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-hb,d=D._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-hb),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=hb-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return sb}else if(a.match_available){if(d=D._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return sb}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=D._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ib){if(m(a),a.lookahead<=ib&&b===H)return sb;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=hb&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ib;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&f>e);a.match_length=ib-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=hb?(c=D._tr_tally(a,1,a.match_length-hb),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===H)return sb;break}if(a.match_length=0,c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function s(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=B[a.level].max_lazy,a.good_match=B[a.level].good_length,a.nice_match=B[a.level].nice_length,a.max_chain_length=B[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=hb-1,a.match_available=0,a.ins_h=0}function t(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=Y,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new C.Buf16(2*fb),this.dyn_dtree=new C.Buf16(2*(2*db+1)),this.bl_tree=new C.Buf16(2*(2*eb+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new C.Buf16(gb+1),this.heap=new C.Buf16(2*cb+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new C.Buf16(2*cb+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function u(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=X,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?lb:qb,a.adler=2===b.wrap?0:1,b.last_flush=H,D._tr_init(b),M):d(a,O)}function v(a){var b=u(a);return b===M&&s(a.state),b}function w(a,b){return a&&a.state?2!==a.state.wrap?O:(a.state.gzhead=b,M):O}function x(a,b,c,e,f,g){if(!a)return O;var h=1;if(b===R&&(b=6),0>e?(h=0,e=-e):e>15&&(h=2,e-=16),1>f||f>Z||c!==Y||8>e||e>15||0>b||b>9||0>g||g>V)return d(a,O);8===e&&(e=9);var i=new t;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+hb-1)/hb),i.window=new C.Buf8(2*i.w_size),i.head=new C.Buf16(i.hash_size),i.prev=new C.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new C.Buf8(i.pending_buf_size),i.d_buf=i.lit_bufsize>>1,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,v(a)}function y(a,b){return x(a,b,Y,$,_,W)}function z(a,b){var c,h,k,l;if(!a||!a.state||b>L||0>b)return a?d(a,O):O;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===rb&&b!==K)return d(a,0===a.avail_out?Q:O);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===lb)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=F(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=mb):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,wb),h.status=qb);else{var m=Y+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=T||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=kb),m+=31-m%31,h.status=qb,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===mb)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=nb)}else h.status=nb;if(h.status===nb)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=ob)}else h.status=ob;if(h.status===ob)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=pb)}else h.status=pb;if(h.status===pb&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=qb)):h.status=qb),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,M}else if(0===a.avail_in&&e(b)<=e(c)&&b!==K)return d(a,Q);if(h.status===rb&&0!==a.avail_in)return d(a,Q);if(0!==a.avail_in||0!==h.lookahead||b!==H&&h.status!==rb){var o=h.strategy===T?r(h,b):h.strategy===U?q(h,b):B[h.level].func(h,b);if((o===ub||o===vb)&&(h.status=rb),o===sb||o===ub)return 0===a.avail_out&&(h.last_flush=-1),M;if(o===tb&&(b===I?D._tr_align(h):b!==L&&(D._tr_stored_block(h,0,0,!1),b===J&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,M}return b!==K?M:h.wrap<=0?N:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?M:N)}function A(a){var b;return a&&a.state?(b=a.state.status,b!==lb&&b!==mb&&b!==nb&&b!==ob&&b!==pb&&b!==qb&&b!==rb?d(a,O):(a.state=null,b===qb?d(a,P):M)):O}var B,C=a("../utils/common"),D=a("./trees"),E=a("./adler32"),F=a("./crc32"),G=a("./messages"),H=0,I=1,J=3,K=4,L=5,M=0,N=1,O=-2,P=-3,Q=-5,R=-1,S=1,T=2,U=3,V=4,W=0,X=2,Y=8,Z=9,$=15,_=8,ab=29,bb=256,cb=bb+1+ab,db=30,eb=19,fb=2*cb+1,gb=15,hb=3,ib=258,jb=ib+hb+1,kb=32,lb=42,mb=69,nb=73,ob=91,pb=103,qb=113,rb=666,sb=1,tb=2,ub=3,vb=4,wb=3,xb=function(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e};B=[new xb(0,0,0,0,n),new xb(4,4,8,4,o),new xb(4,5,16,8,o),new xb(4,6,32,32,o),new xb(4,4,16,16,p),new xb(8,16,32,32,p),new xb(8,16,128,128,p),new xb(8,32,128,256,p),new xb(32,128,258,1024,p),new xb(32,258,258,4096,p)],c.deflateInit=y,c.deflateInit2=x,c.deflateReset=v,c.deflateResetKeep=u,c.deflateSetHeader=w,c.deflate=z,c.deflateEnd=A,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./messages":37,"./trees":38}],33:[function(a,b){"use strict";function c(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=c},{}],34:[function(a,b){"use strict";var c=30,d=12;b.exports=function(a,b){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;e=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=e.dmax,l=e.wsize,m=e.whave,n=e.wnext,o=e.window,p=e.hold,q=e.bits,r=e.lencode,s=e.distcode,t=(1<<e.lenbits)-1,u=(1<<e.distbits)-1;a:do{15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){e.mode=d;break a}a.msg="invalid literal/length code",e.mode=c;break a}x=65535&v,w&=15,w&&(w>q&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",e.mode=c;break a}if(y=65535&v,w&=15,w>q&&(p+=B[f++]<<q,q+=8,w>q&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",e.mode=c;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&e.correct){a.msg="invalid distance too far back",e.mode=c;break a}if(z=0,A=o,0===n){if(z+=l-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(w>n){if(z+=l+n-w,w-=n,x>w){x-=w;do C[h++]=o[z++];while(--w);if(z=0,x>n){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(g>f&&j>h);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=g>f?5+(g-f):5-(f-g),a.avail_out=j>h?257+(j-h):257-(h-j),e.hold=p,e.bits=q}},{}],35:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new r.Buf16(320),this.work=new r.Buf16(288),this.lendyn=null,this.distdyn=null,this.correct=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=K,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new r.Buf32(ob),b.distcode=b.distdyn=new r.Buf32(pb),b.correct=1,b.back=-1,C):F}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):F}function h(a,b){var c,d;return a&&a.state?(d=a.state,0>b?(c=0,b=-b):(c=(b>>4)+1,48>b&&(b&=15)),b&&(8>b||b>15)?F:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):F}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==C&&(a.state=null),c):F}function j(a){return i(a,rb)}function k(a){if(sb){var b;for(p=new r.Buf32(512),q=new r.Buf32(32),b=0;144>b;)a.lens[b++]=8;for(;256>b;)a.lens[b++]=9;for(;280>b;)a.lens[b++]=7;for(;288>b;)a.lens[b++]=8;for(v(x,a.lens,0,288,p,0,a.work,{bits:9}),b=0;32>b;)a.lens[b++]=5;v(y,a.lens,0,32,q,0,a.work,{bits:5}),sb=!1}a.lencode=p,a.lenbits=9,a.distcode=q,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new r.Buf8(f.wsize)),d>=f.wsize?(r.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),r.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(r.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,ob,pb,qb,rb,sb,tb,ub,vb,wb,xb,yb,zb,Ab=0,Bb=new r.Buf8(4),Cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return F;c=a.state,c.mode===V&&(c.mode=W),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xb=C;a:for(;;)switch(c.mode){case K:if(0===c.wrap){c.mode=W;break}for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0),m=0,n=0,c.mode=L;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=lb;break}if((15&m)!==J){a.msg="unknown compression method",c.mode=lb;break}if(m>>>=4,n-=4,wb=(15&m)+8,0===c.wbits)c.wbits=wb;else if(wb>c.wbits){a.msg="invalid window size",c.mode=lb;break}c.dmax=1<<wb,a.adler=c.check=1,c.mode=512&m?T:V,m=0,n=0;break;case L:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==J){a.msg="unknown compression method",c.mode=lb;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=lb;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=M;case M:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,Bb[2]=m>>>16&255,Bb[3]=m>>>24&255,c.check=t(c.check,Bb,4,0)),m=0,n=0,c.mode=N;case N:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=O;case O:if(1024&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=P;case P:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wb=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),r.arraySet(c.head.extra,e,g,q,wb)),512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=Q;case Q:if(2048&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.name+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=R;case R:if(4096&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.comment+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.comment=null);c.mode=S;case S:if(512&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=lb;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=V;break;case T:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=U;case U:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,E;a.adler=c.check=1,c.mode=V;case V:if(b===A||b===B)break a;case W:if(c.last){m>>>=7&n,n-=7&n,c.mode=ib;break}for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=X;break;case 1:if(k(c),c.mode=bb,b===B){m>>>=2,n-=2;break a}break;case 2:c.mode=$;break;case 3:a.msg="invalid block type",c.mode=lb}m>>>=2,n-=2;break;case X:for(m>>>=7&n,n-=7&n;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=lb;break}if(c.length=65535&m,m=0,n=0,c.mode=Y,b===B)break a;case Y:c.mode=Z;case Z:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;r.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=V;break;case $:for(;14>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=lb;break}c.have=0,c.mode=_;case _:for(;c.have<c.ncode;){for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Cb[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Cb[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,yb={bits:c.lenbits},xb=v(w,c.lens,0,19,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid code lengths set",c.mode=lb;break}c.have=0,c.mode=ab;case ab:for(;c.have<c.nlen+c.ndist;){for(;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(16>sb)m>>>=qb,n-=qb,c.lens[c.have++]=sb;else{if(16===sb){for(zb=qb+2;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qb,n-=qb,0===c.have){a.msg="invalid bit length repeat",c.mode=lb;break}wb=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sb){for(zb=qb+3;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=3+(7&m),m>>>=3,n-=3}else{for(zb=qb+7;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=lb;break}for(;q--;)c.lens[c.have++]=wb}}if(c.mode===lb)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=lb;break}if(c.lenbits=9,yb={bits:c.lenbits},xb=v(x,c.lens,0,c.nlen,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid literal/lengths set",c.mode=lb;break}if(c.distbits=6,c.distcode=c.distdyn,yb={bits:c.distbits},xb=v(y,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,yb),c.distbits=yb.bits,xb){a.msg="invalid distances set",c.mode=lb;break}if(c.mode=bb,b===B)break a;case bb:c.mode=cb;case cb:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,u(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===V&&(c.back=-1);break}for(c.back=0;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(rb&&0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.lencode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,c.length=sb,0===rb){c.mode=hb;break}if(32&rb){c.back=-1,c.mode=V;break}if(64&rb){a.msg="invalid literal/length code",c.mode=lb;break}c.extra=15&rb,c.mode=db;case db:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=eb;case eb:for(;Ab=c.distcode[m&(1<<c.distbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.distcode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,64&rb){a.msg="invalid distance code",c.mode=lb;break}c.offset=sb,c.extra=15&rb,c.mode=fb;case fb:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=lb;break}c.mode=gb;case gb:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.correct){a.msg="invalid distance too far back",c.mode=lb;break}q>c.wnext?(q-=c.wnext,ob=c.wsize-q):ob=c.wnext-q,q>c.length&&(q=c.length),pb=c.window}else pb=f,ob=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pb[ob++];while(--q);0===c.length&&(c.mode=cb);break;case hb:if(0===j)break a;f[h++]=c.length,j--,c.mode=cb;break;case ib:if(c.wrap){for(;32>n;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?t(c.check,f,p,h-p):s(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=lb;break}m=0,n=0}c.mode=jb;case jb:if(c.wrap&&c.flags){for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=lb;break}m=0,n=0}c.mode=kb;case kb:xb=D;break a;case lb:xb=G;break a;case mb:return H;case nb:default:return F}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<lb&&(c.mode<ib||b!==z))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=mb,H):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?t(c.check,f,p,a.next_out-p):s(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===V?128:0)+(c.mode===bb||c.mode===Y?256:0),(0===o&&0===p||b===z)&&xb===C&&(xb=I),xb)}function n(a){if(!a||!a.state)return F;var b=a.state;return b.window&&(b.window=null),a.state=null,C}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?F:(c.head=b,b.done=!1,C)):F}var p,q,r=a("../utils/common"),s=a("./adler32"),t=a("./crc32"),u=a("./inffast"),v=a("./inftrees"),w=0,x=1,y=2,z=4,A=5,B=6,C=0,D=1,E=2,F=-2,G=-3,H=-4,I=-5,J=8,K=1,L=2,M=3,N=4,O=5,P=6,Q=7,R=8,S=9,T=10,U=11,V=12,W=13,X=14,Y=15,Z=16,$=17,_=18,ab=19,bb=20,cb=21,db=22,eb=23,fb=24,gb=25,hb=26,ib=27,jb=28,kb=29,lb=30,mb=31,nb=32,ob=852,pb=592,qb=15,rb=qb,sb=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./inffast":34,"./inftrees":36}],36:[function(a,b){"use strict";var c=a("../utils/common"),d=15,e=852,f=592,g=0,h=1,i=2,j=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],k=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],l=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],m=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,n,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new c.Buf16(d+1),Q=new c.Buf16(d+1),R=null,S=0;for(D=0;d>=D;D++)P[D]=0;for(E=0;o>E;E++)P[b[n+E]]++;for(H=C,G=d;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;G>F&&0===P[F];F++);for(F>H&&(H=F),K=1,D=1;d>=D;D++)if(K<<=1,K-=P[D],0>K)return-1;if(K>0&&(a===g||1!==G))return-1;for(Q[1]=0,D=1;d>D;D++)Q[D+1]=Q[D]+P[D];for(E=0;o>E;E++)0!==b[n+E]&&(r[Q[b[n+E]]++]=E);if(a===g?(N=R=r,y=19):a===h?(N=j,O-=257,R=k,S-=257,y=256):(N=l,R=m,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===h&&L>e||a===i&&L>f)return 1;for(var T=0;;){T++,z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[n+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;G>I+J&&(K-=P[I+J],!(0>=K));)I++,K<<=1;if(L+=1<<I,a===h&&L>e||a===i&&L>f)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":27}],37:[function(a,b){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],38:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a){return 256>a?gb[a]:gb[256+(a>>>7)]}function f(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function g(a,b,c){a.bi_valid>V-c?(a.bi_buf|=b<<a.bi_valid&65535,f(a,a.bi_buf),a.bi_buf=b>>V-a.bi_valid,a.bi_valid+=c-V):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function h(a,b,c){g(a,c[2*b],c[2*b+1])}function i(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function j(a){16===a.bi_valid?(f(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function k(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;U>=f;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,c=a.heap_max+1;T>c;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function l(a,b,c){var d,e,f=new Array(U+1),g=0;for(d=1;U>=d;d++)f[d]=g=g+c[d-1]<<1;for(e=0;b>=e;e++){var h=a[2*e+1];0!==h&&(a[2*e]=i(f[h]++,h))}}function m(){var a,b,c,d,e,f=new Array(U+1);for(c=0,d=0;O-1>d;d++)for(ib[d]=c,a=0;a<1<<_[d];a++)hb[c++]=d;for(hb[c-1]=d,e=0,d=0;16>d;d++)for(jb[d]=e,a=0;a<1<<ab[d];a++)gb[e++]=d;for(e>>=7;R>d;d++)for(jb[d]=e<<7,a=0;a<1<<ab[d]-7;a++)gb[256+e++]=d;for(b=0;U>=b;b++)f[b]=0;for(a=0;143>=a;)eb[2*a+1]=8,a++,f[8]++;for(;255>=a;)eb[2*a+1]=9,a++,f[9]++;for(;279>=a;)eb[2*a+1]=7,a++,f[7]++;for(;287>=a;)eb[2*a+1]=8,a++,f[8]++;for(l(eb,Q+1,f),a=0;R>a;a++)fb[2*a+1]=5,fb[2*a]=i(a,5);kb=new nb(eb,_,P+1,Q,U),lb=new nb(fb,ab,0,R,U),mb=new nb(new Array(0),bb,0,S,W)}function n(a){var b;for(b=0;Q>b;b++)a.dyn_ltree[2*b]=0;for(b=0;R>b;b++)a.dyn_dtree[2*b]=0;for(b=0;S>b;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*X]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function o(a){a.bi_valid>8?f(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function p(a,b,c,d){o(a),d&&(f(a,c),f(a,~c)),E.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function q(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function r(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&q(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!q(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function s(a,b,c){var d,f,i,j,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],f=a.pending_buf[a.l_buf+k],k++,0===d?h(a,f,b):(i=hb[f],h(a,i+P+1,b),j=_[i],0!==j&&(f-=ib[i],g(a,f,j)),d--,i=e(d),h(a,i,c),j=ab[i],0!==j&&(d-=jb[i],g(a,d,j)));while(k<a.last_lit);h(a,X,b)}function t(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=T,c=0;i>c;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=2>j?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)r(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],r(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,r(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],k(a,b),l(f,j,a.bl_count)}function u(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;c>=d;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(j>h?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*Y]++):10>=h?a.bl_tree[2*Z]++:a.bl_tree[2*$]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function v(a,b,c){var d,e,f=-1,i=b[1],j=0,k=7,l=4;for(0===i&&(k=138,l=3),d=0;c>=d;d++)if(e=i,i=b[2*(d+1)+1],!(++j<k&&e===i)){if(l>j){do h(a,e,a.bl_tree);while(0!==--j)}else 0!==e?(e!==f&&(h(a,e,a.bl_tree),j--),h(a,Y,a.bl_tree),g(a,j-3,2)):10>=j?(h(a,Z,a.bl_tree),g(a,j-3,3)):(h(a,$,a.bl_tree),g(a,j-11,7));j=0,f=e,0===i?(k=138,l=3):e===i?(k=6,l=3):(k=7,l=4)}}function w(a){var b;for(u(a,a.dyn_ltree,a.l_desc.max_code),u(a,a.dyn_dtree,a.d_desc.max_code),t(a,a.bl_desc),b=S-1;b>=3&&0===a.bl_tree[2*cb[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function x(a,b,c,d){var e;for(g(a,b-257,5),g(a,c-1,5),g(a,d-4,4),e=0;d>e;e++)g(a,a.bl_tree[2*cb[e]+1],3);v(a,a.dyn_ltree,b-1),v(a,a.dyn_dtree,c-1)}function y(a){var b,c=4093624447;for(b=0;31>=b;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return G;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return H;for(b=32;P>b;b++)if(0!==a.dyn_ltree[2*b])return H;return G}function z(a){pb||(m(),pb=!0),a.l_desc=new ob(a.dyn_ltree,kb),a.d_desc=new ob(a.dyn_dtree,lb),a.bl_desc=new ob(a.bl_tree,mb),a.bi_buf=0,a.bi_valid=0,n(a)}function A(a,b,c,d){g(a,(J<<1)+(d?1:0),3),p(a,b,c,!0)}function B(a){g(a,K<<1,3),h(a,X,eb),j(a)}function C(a,b,c,d){var e,f,h=0;a.level>0?(a.strm.data_type===I&&(a.strm.data_type=y(a)),t(a,a.l_desc),t(a,a.d_desc),h=w(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,e>=f&&(e=f)):e=f=c+5,e>=c+4&&-1!==b?A(a,b,c,d):a.strategy===F||f===e?(g(a,(K<<1)+(d?1:0),3),s(a,eb,fb)):(g(a,(L<<1)+(d?1:0),3),x(a,a.l_desc.max_code+1,a.d_desc.max_code+1,h+1),s(a,a.dyn_ltree,a.dyn_dtree)),n(a),d&&o(a)}function D(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(hb[c]+P+1)]++,a.dyn_dtree[2*e(b)]++),a.last_lit===a.lit_bufsize-1}var E=a("../utils/common"),F=4,G=0,H=1,I=2,J=0,K=1,L=2,M=3,N=258,O=29,P=256,Q=P+1+O,R=30,S=19,T=2*Q+1,U=15,V=16,W=7,X=256,Y=16,Z=17,$=18,_=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ab=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],bb=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],db=512,eb=new Array(2*(Q+2));d(eb);var fb=new Array(2*R);d(fb);var gb=new Array(db);d(gb);var hb=new Array(N-M+1);d(hb);var ib=new Array(O);d(ib);var jb=new Array(R);d(jb);var kb,lb,mb,nb=function(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length},ob=function(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b},pb=!1;c._tr_init=z,c._tr_stored_block=A,c._tr_flush_block=C,c._tr_tally=D,c._tr_align=B},{"../utils/common":27}],39:[function(a,b){"use strict";function c(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=c},{}]},{},[9])(9)});'use strict';if(tr.isVinn){global.JSZip=global.window.JSZip;global.window=undefined;}else if(tr.isNode){const jsZipAbsPath=HTMLImportsLoader.hrefToAbsolutePath('/jszip.min.js');const jsZipModule=require(jsZipAbsPath);global.JSZip=jsZipModule;}'use strict';tr.exportTo('tr.e.importer',function(){function ZipImporter(model,eventData){if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);} +!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;"undefined"!=typeof window?b=window:"undefined"!=typeof global?b=global:"undefined"!=typeof self&&(b=self),b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);throw new Error("Cannot find module '"+g+"'")}var j=c[g]={exports:{}};b[g][0].call(j.exports,function(a){var c=b[g][1][a];return e(c?c:a)},j,j.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,f,g,h,i,j="",k=0;k<a.length;)b=a.charCodeAt(k++),c=a.charCodeAt(k++),e=a.charCodeAt(k++),f=b>>2,g=(3&b)<<4|c>>4,h=(15&c)<<2|e>>6,i=63&e,isNaN(c)?h=i=64:isNaN(e)&&(i=64),j=j+d.charAt(f)+d.charAt(g)+d.charAt(h)+d.charAt(i);return j},c.decode=function(a){var b,c,e,f,g,h,i,j="",k=0;for(a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");k<a.length;)f=d.indexOf(a.charAt(k++)),g=d.indexOf(a.charAt(k++)),h=d.indexOf(a.charAt(k++)),i=d.indexOf(a.charAt(k++)),b=f<<2|g>>4,c=(15&g)<<4|h>>2,e=(3&h)<<6|i,j+=String.fromCharCode(b),64!=h&&(j+=String.fromCharCode(c)),64!=i&&(j+=String.fromCharCode(e));return j}},{}],2:[function(a,b){"use strict";function c(){this.compressedSize=0,this.uncompressedSize=0,this.crc32=0,this.compressionMethod=null,this.compressedContent=null}c.prototype={getContent:function(){return null},getCompressedContent:function(){return null}},b.exports=c},{}],3:[function(a,b,c){"use strict";c.STORE={magic:"\x00\x00",compress:function(a){return a},uncompress:function(a){return a},compressInputType:null,uncompressInputType:null},c.DEFLATE=a("./flate")},{"./flate":8}],4:[function(a,b){"use strict";var c=a("./utils"),d=[0,1996959894,3993919788,2567524794,124634137,1886057615,3915621685,2657392035,249268274,2044508324,3772115230,2547177864,162941995,2125561021,3887607047,2428444049,498536548,1789927666,4089016648,2227061214,450548861,1843258603,4107580753,2211677639,325883990,1684777152,4251122042,2321926636,335633487,1661365465,4195302755,2366115317,997073096,1281953886,3579855332,2724688242,1006888145,1258607687,3524101629,2768942443,901097722,1119000684,3686517206,2898065728,853044451,1172266101,3705015759,2882616665,651767980,1373503546,3369554304,3218104598,565507253,1454621731,3485111705,3099436303,671266974,1594198024,3322730930,2970347812,795835527,1483230225,3244367275,3060149565,1994146192,31158534,2563907772,4023717930,1907459465,112637215,2680153253,3904427059,2013776290,251722036,2517215374,3775830040,2137656763,141376813,2439277719,3865271297,1802195444,476864866,2238001368,4066508878,1812370925,453092731,2181625025,4111451223,1706088902,314042704,2344532202,4240017532,1658658271,366619977,2362670323,4224994405,1303535960,984961486,2747007092,3569037538,1256170817,1037604311,2765210733,3554079995,1131014506,879679996,2909243462,3663771856,1141124467,855842277,2852801631,3708648649,1342533948,654459306,3188396048,3373015174,1466479909,544179635,3110523913,3462522015,1591671054,702138776,2966460450,3352799412,1504918807,783551873,3082640443,3233442989,3988292384,2596254646,62317068,1957810842,3939845945,2647816111,81470997,1943803523,3814918930,2489596804,225274430,2053790376,3826175755,2466906013,167816743,2097651377,4027552580,2265490386,503444072,1762050814,4150417245,2154129355,426522225,1852507879,4275313526,2312317920,282753626,1742555852,4189708143,2394877945,397917763,1622183637,3604390888,2714866558,953729732,1340076626,3518719985,2797360999,1068828381,1219638859,3624741850,2936675148,906185462,1090812512,3747672003,2825379669,829329135,1181335161,3412177804,3160834842,628085408,1382605366,3423369109,3138078467,570562233,1426400815,3317316542,2998733608,733239954,1555261956,3268935591,3050360625,752459403,1541320221,2607071920,3965973030,1969922972,40735498,2617837225,3943577151,1913087877,83908371,2512341634,3803740692,2075208622,213261112,2463272603,3855990285,2094854071,198958881,2262029012,4057260610,1759359992,534414190,2176718541,4139329115,1873836001,414664567,2282248934,4279200368,1711684554,285281116,2405801727,4167216745,1634467795,376229701,2685067896,3608007406,1308918612,956543938,2808555105,3495958263,1231636301,1047427035,2932959818,3654703836,1088359270,936918e3,2847714899,3736837829,1202900863,817233897,3183342108,3401237130,1404277552,615818150,3134207493,3453421203,1423857449,601450431,3009837614,3294710456,1567103746,711928724,3020668471,3272380065,1510334235,755167117];b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var e="string"!==c.getTypeOf(a);"undefined"==typeof b&&(b=0);var f=0,g=0,h=0;b=-1^b;for(var i=0,j=a.length;j>i;i++)h=e?a[i]:a.charCodeAt(i),g=255&(b^h),f=d[g],b=b>>>8^f;return-1^b}},{"./utils":21}],5:[function(a,b){"use strict";function c(){this.data=null,this.length=0,this.index=0}var d=a("./utils");c.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<a||0>a)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return d.transformTo("string",this.readData(a))},readData:function(){},lastIndexOfSignature:function(){},readDate:function(){var a=this.readInt(4);return new Date((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1)}},b.exports=c},{"./utils":21}],6:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!1,c.date=null,c.compression=null,c.comment=null},{}],7:[function(a,b,c){"use strict";var d=a("./utils");c.string2binary=function(a){return d.string2binary(a)},c.string2Uint8Array=function(a){return d.transformTo("uint8array",a)},c.uint8Array2String=function(a){return d.transformTo("string",a)},c.string2Blob=function(a){var b=d.transformTo("arraybuffer",a);return d.arrayBuffer2Blob(b)},c.arrayBuffer2Blob=function(a){return d.arrayBuffer2Blob(a)},c.transformTo=function(a,b){return d.transformTo(a,b)},c.getTypeOf=function(a){return d.getTypeOf(a)},c.checkSupport=function(a){return d.checkSupport(a)},c.MAX_VALUE_16BITS=d.MAX_VALUE_16BITS,c.MAX_VALUE_32BITS=d.MAX_VALUE_32BITS,c.pretty=function(a){return d.pretty(a)},c.findCompression=function(a){return d.findCompression(a)},c.isRegExp=function(a){return d.isRegExp(a)}},{"./utils":21}],8:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,e=a("pako");c.uncompressInputType=d?"uint8array":"array",c.compressInputType=d?"uint8array":"array",c.magic="\b\x00",c.compress=function(a){return e.deflateRaw(a)},c.uncompress=function(a){return e.inflateRaw(a)}},{pako:24}],9:[function(a,b){"use strict";function c(a,b){return this instanceof c?(this.files={},this.comment=null,this.root="",a&&this.load(a,b),void(this.clone=function(){var a=new c;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a})):new c(a,b)}var d=a("./base64");c.prototype=a("./object"),c.prototype.load=a("./load"),c.support=a("./support"),c.defaults=a("./defaults"),c.utils=a("./deprecatedPublicUtils"),c.base64={encode:function(a){return d.encode(a)},decode:function(a){return d.decode(a)}},c.compressions=a("./compressions"),b.exports=c},{"./base64":1,"./compressions":3,"./defaults":6,"./deprecatedPublicUtils":7,"./load":10,"./object":13,"./support":17}],10:[function(a,b){"use strict";var c=a("./base64"),d=a("./zipEntries");b.exports=function(a,b){var e,f,g,h;for(b=b||{},b.base64&&(a=c.decode(a)),f=new d(a,b),e=f.files,g=0;g<e.length;g++)h=e[g],this.file(h.fileName,h.decompressed,{binary:!0,optimizedBinaryString:!0,date:h.date,dir:h.dir,comment:h.fileComment.length?h.fileComment:null,createFolders:b.createFolders});return f.zipComment.length&&(this.comment=f.zipComment),this}},{"./base64":1,"./zipEntries":22}],11:[function(a,b){(function(a){"use strict";b.exports=function(b,c){return new a(b,c)},b.exports.test=function(b){return a.isBuffer(b)}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],12:[function(a,b){"use strict";function c(a){this.data=a,this.length=this.data.length,this.index=0}var d=a("./uint8ArrayReader");c.prototype=new d,c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./uint8ArrayReader":18}],13:[function(a,b){"use strict";var c=a("./support"),d=a("./utils"),e=a("./crc32"),f=a("./signature"),g=a("./defaults"),h=a("./base64"),i=a("./compressions"),j=a("./compressedObject"),k=a("./nodeBuffer"),l=a("./utf8"),m=a("./stringWriter"),n=a("./uint8ArrayWriter"),o=function(a){if(a._data instanceof j&&(a._data=a._data.getContent(),a.options.binary=!0,a.options.base64=!1,"uint8array"===d.getTypeOf(a._data))){var b=a._data;a._data=new Uint8Array(b.length),0!==b.length&&a._data.set(b,0)}return a._data},p=function(a){var b=o(a),e=d.getTypeOf(b);return"string"===e?!a.options.binary&&c.nodebuffer?k(b,"utf-8"):a.asBinary():b},q=function(a){var b=o(this);return null===b||"undefined"==typeof b?"":(this.options.base64&&(b=h.decode(b)),b=a&&this.options.binary?A.utf8decode(b):d.transformTo("string",b),a||this.options.binary||(b=d.transformTo("string",A.utf8encode(b))),b)},r=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this._data=b,this.options=c,this._initialMetadata={dir:c.dir,date:c.date}};r.prototype={asText:function(){return q.call(this,!0)},asBinary:function(){return q.call(this,!1)},asNodeBuffer:function(){var a=p(this);return d.transformTo("nodebuffer",a)},asUint8Array:function(){var a=p(this);return d.transformTo("uint8array",a)},asArrayBuffer:function(){return this.asUint8Array().buffer}};var s=function(a,b){var c,d="";for(c=0;b>c;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},t=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},u=function(a){return a=a||{},a.base64!==!0||null!==a.binary&&void 0!==a.binary||(a.binary=!0),a=t(a,g),a.date=a.date||new Date,null!==a.compression&&(a.compression=a.compression.toUpperCase()),a},v=function(a,b,c){var e,f=d.getTypeOf(b);if(c=u(c),c.createFolders&&(e=w(a))&&x.call(this,e,!0),c.dir||null===b||"undefined"==typeof b)c.base64=!1,c.binary=!1,b=null;else if("string"===f)c.binary&&!c.base64&&c.optimizedBinaryString!==!0&&(b=d.string2binary(b));else{if(c.base64=!1,c.binary=!0,!(f||b instanceof j))throw new Error("The data of '"+a+"' is in an unsupported format !");"arraybuffer"===f&&(b=d.transformTo("uint8array",b))}var g=new r(a,b,c);return this.files[a]=g,g},w=function(a){"/"==a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},x=function(a,b){return"/"!=a.slice(-1)&&(a+="/"),b="undefined"!=typeof b?b:!1,this.files[a]||v.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},y=function(a,b){var c,f=new j;return a._data instanceof j?(f.uncompressedSize=a._data.uncompressedSize,f.crc32=a._data.crc32,0===f.uncompressedSize||a.dir?(b=i.STORE,f.compressedContent="",f.crc32=0):a._data.compressionMethod===b.magic?f.compressedContent=a._data.getCompressedContent():(c=a._data.getContent(),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c)))):(c=p(a),(!c||0===c.length||a.dir)&&(b=i.STORE,c=""),f.uncompressedSize=c.length,f.crc32=e(c),f.compressedContent=b.compress(d.transformTo(b.compressInputType,c))),f.compressedSize=f.compressedContent.length,f.compressionMethod=b.magic,f},z=function(a,b,c,g){var h,i,j,k,m=(c.compressedContent,d.transformTo("string",l.utf8encode(b.name))),n=b.comment||"",o=d.transformTo("string",l.utf8encode(n)),p=m.length!==b.name.length,q=o.length!==n.length,r=b.options,t="",u="",v="";j=b._initialMetadata.dir!==b.dir?b.dir:r.dir,k=b._initialMetadata.date!==b.date?b.date:r.date,h=k.getHours(),h<<=6,h|=k.getMinutes(),h<<=5,h|=k.getSeconds()/2,i=k.getFullYear()-1980,i<<=4,i|=k.getMonth()+1,i<<=5,i|=k.getDate(),p&&(u=s(1,1)+s(e(m),4)+m,t+="up"+s(u.length,2)+u),q&&(v=s(1,1)+s(this.crc32(o),4)+o,t+="uc"+s(v.length,2)+v);var w="";w+="\n\x00",w+=p||q?"\x00\b":"\x00\x00",w+=c.compressionMethod,w+=s(h,2),w+=s(i,2),w+=s(c.crc32,4),w+=s(c.compressedSize,4),w+=s(c.uncompressedSize,4),w+=s(m.length,2),w+=s(t.length,2);var x=f.LOCAL_FILE_HEADER+w+m+t,y=f.CENTRAL_FILE_HEADER+"\x00"+w+s(o.length,2)+"\x00\x00\x00\x00"+(j===!0?"\x00\x00\x00":"\x00\x00\x00\x00")+s(g,4)+m+t+o;return{fileRecord:x,dirRecord:y,compressedObject:c}},A={load:function(){throw new Error("Load method is not defined. Is the file jszip-load.js included ?")},filter:function(a){var b,c,d,e,f=[];for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],e=new r(d.name,d._data,t(d.options)),c=b.slice(this.root.length,b.length),b.slice(0,this.root.length)===this.root&&a(c,e)&&f.push(e));return f},file:function(a,b,c){if(1===arguments.length){if(d.isRegExp(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}return this.filter(function(b,c){return!c.dir&&b===a})[0]||null}return a=this.root+a,v.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d.isRegExp(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=x.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!=a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){a=t(a||{},{base64:!0,compression:"STORE",type:"base64",comment:null}),d.checkSupport(a.type);var b,c,e=[],g=0,j=0,k=d.transformTo("string",this.utf8encode(a.comment||this.comment||""));for(var l in this.files)if(this.files.hasOwnProperty(l)){var o=this.files[l],p=o.options.compression||a.compression.toUpperCase(),q=i[p];if(!q)throw new Error(p+" is not a valid compression method !");var r=y.call(this,o,q),u=z.call(this,l,o,r,g);g+=u.fileRecord.length+r.compressedSize,j+=u.dirRecord.length,e.push(u)}var v="";v=f.CENTRAL_DIRECTORY_END+"\x00\x00\x00\x00"+s(e.length,2)+s(e.length,2)+s(j,4)+s(g,4)+s(k.length,2)+k;var w=a.type.toLowerCase();for(b="uint8array"===w||"arraybuffer"===w||"blob"===w||"nodebuffer"===w?new n(g+j+v.length):new m(g+j+v.length),c=0;c<e.length;c++)b.append(e[c].fileRecord),b.append(e[c].compressedObject.compressedContent);for(c=0;c<e.length;c++)b.append(e[c].dirRecord);b.append(v);var x=b.finalize();switch(a.type.toLowerCase()){case"uint8array":case"arraybuffer":case"nodebuffer":return d.transformTo(a.type.toLowerCase(),x);case"blob":return d.arrayBuffer2Blob(d.transformTo("arraybuffer",x));case"base64":return a.base64?h.encode(x):x;default:return x}},crc32:function(a,b){return e(a,b)},utf8encode:function(a){return d.transformTo("string",l.utf8encode(a))},utf8decode:function(a){return l.utf8decode(a)}};b.exports=A},{"./base64":1,"./compressedObject":2,"./compressions":3,"./crc32":4,"./defaults":6,"./nodeBuffer":11,"./signature":14,"./stringWriter":16,"./support":17,"./uint8ArrayWriter":19,"./utf8":20,"./utils":21}],14:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],15:[function(a,b){"use strict";function c(a,b){this.data=a,b||(this.data=e.string2binary(this.data)),this.length=this.data.length,this.index=0}var d=a("./dataReader"),e=a("./utils");c.prototype=new d,c.prototype.byteAt=function(a){return this.data.charCodeAt(a)},c.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)},c.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5,"./utils":21}],16:[function(a,b){"use strict";var c=a("./utils"),d=function(){this.data=[]};d.prototype={append:function(a){a=c.transformTo("string",a),this.data.push(a)},finalize:function(){return this.data.join("")}},b.exports=d},{"./utils":21}],17:[function(a,b,c){(function(a){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof a,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var b=new ArrayBuffer(0);try{c.blob=0===new Blob([b],{type:"application/zip"}).size}catch(d){try{var e=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,f=new e;f.append(b),c.blob=0===f.getBlob("application/zip").size}catch(d){c.blob=!1}}}}).call(this,"undefined"!=typeof Buffer?Buffer:void 0)},{}],18:[function(a,b){"use strict";function c(a){a&&(this.data=a,this.length=this.data.length,this.index=0)}var d=a("./dataReader");c.prototype=new d,c.prototype.byteAt=function(a){return this.data[a]},c.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f;return-1},c.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.index,this.index+a);return this.index+=a,b},b.exports=c},{"./dataReader":5}],19:[function(a,b){"use strict";var c=a("./utils"),d=function(a){this.data=new Uint8Array(a),this.index=0};d.prototype={append:function(a){0!==a.length&&(a=c.transformTo("uint8array",a),this.data.set(a,this.index),this.index+=a.length)},finalize:function(){return this.data}},b.exports=d},{"./utils":21}],20:[function(a,b,c){"use strict";for(var d=a("./utils"),e=a("./support"),f=a("./nodeBuffer"),g=new Array(256),h=0;256>h;h++)g[h]=h>=252?6:h>=248?5:h>=240?4:h>=224?3:h>=192?2:1;g[254]=g[254]=1;var i=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=e.uint8array?new Uint8Array(i):new Array(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},j=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+g[a[c]]>b?c:b},k=function(a){var b,c,e,f,h=a.length,i=new Array(2*h);for(c=0,b=0;h>b;)if(e=a[b++],128>e)i[c++]=e;else if(f=g[e],f>4)i[c++]=65533,b+=f-1;else{for(e&=2===f?31:3===f?15:7;f>1&&h>b;)e=e<<6|63&a[b++],f--;f>1?i[c++]=65533:65536>e?i[c++]=e:(e-=65536,i[c++]=55296|e>>10&1023,i[c++]=56320|1023&e)}return i.length!==c&&(i.subarray?i=i.subarray(0,c):i.length=c),d.applyFromCharCode(i)};c.utf8encode=function(a){return e.nodebuffer?f(a,"utf-8"):i(a)},c.utf8decode=function(a){if(e.nodebuffer)return d.transformTo("nodebuffer",a).toString("utf-8");a=d.transformTo(e.uint8array?"uint8array":"array",a);for(var b=[],c=0,f=a.length,g=65536;f>c;){var h=j(a,Math.min(c+g,f));b.push(e.uint8array?k(a.subarray(c,h)):k(a.slice(c,h))),c=h}return b.join("")}},{"./nodeBuffer":11,"./support":17,"./utils":21}],21:[function(a,b,c){"use strict";function d(a){return a}function e(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function f(a){var b=65536,d=[],e=a.length,f=c.getTypeOf(a),g=0,h=!0;try{switch(f){case"uint8array":String.fromCharCode.apply(null,new Uint8Array(0));break;case"nodebuffer":String.fromCharCode.apply(null,j(0))}}catch(i){h=!1}if(!h){for(var k="",l=0;l<a.length;l++)k+=String.fromCharCode(a[l]);return k}for(;e>g&&b>1;)try{d.push("array"===f||"nodebuffer"===f?String.fromCharCode.apply(null,a.slice(g,Math.min(g+b,e))):String.fromCharCode.apply(null,a.subarray(g,Math.min(g+b,e)))),g+=b}catch(i){b=Math.floor(b/2)}return d.join("")}function g(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];return b}var h=a("./support"),i=a("./compressions"),j=a("./nodeBuffer");c.string2binary=function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(255&a.charCodeAt(c));return b},c.arrayBuffer2Blob=function(a){c.checkSupport("blob");try{return new Blob([a],{type:"application/zip"})}catch(b){try{var d=window.BlobBuilder||window.WebKitBlobBuilder||window.MozBlobBuilder||window.MSBlobBuilder,e=new d;return e.append(a),e.getBlob("application/zip")}catch(b){throw new Error("Bug : can't construct the Blob.")}}},c.applyFromCharCode=f;var k={};k.string={string:d,array:function(a){return e(a,new Array(a.length))},arraybuffer:function(a){return k.string.uint8array(a).buffer},uint8array:function(a){return e(a,new Uint8Array(a.length))},nodebuffer:function(a){return e(a,j(a.length))}},k.array={string:f,array:d,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(a)}},k.arraybuffer={string:function(a){return f(new Uint8Array(a))},array:function(a){return g(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:d,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return j(new Uint8Array(a))}},k.uint8array={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:d,nodebuffer:function(a){return j(a)}},k.nodebuffer={string:f,array:function(a){return g(a,new Array(a.length))},arraybuffer:function(a){return k.nodebuffer.uint8array(a).buffer},uint8array:function(a){return g(a,new Uint8Array(a.length))},nodebuffer:d},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=k[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":h.nodebuffer&&j.test(a)?"nodebuffer":h.uint8array&&a instanceof Uint8Array?"uint8array":h.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=h[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this browser")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(16>b?"0":"")+b.toString(16).toUpperCase();return d},c.findCompression=function(a){for(var b in i)if(i.hasOwnProperty(b)&&i[b].magic===a)return i[b];return null},c.isRegExp=function(a){return"[object RegExp]"===Object.prototype.toString.call(a)}},{"./compressions":3,"./nodeBuffer":11,"./support":17}],22:[function(a,b){"use strict";function c(a,b){this.files=[],this.loadOptions=b,a&&this.load(a)}var d=a("./stringReader"),e=a("./nodeBufferReader"),f=a("./uint8ArrayReader"),g=a("./utils"),h=a("./signature"),i=a("./zipEntry"),j=a("./support"),k=a("./object");c.prototype={checkSignature:function(a){var b=this.reader.readString(4);if(b!==a)throw new Error("Corrupted zip or bug : unexpected signature ("+g.pretty(b)+", expected "+g.pretty(a)+")")},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2),this.zipComment=this.reader.readString(this.zipCommentLength),this.zipComment=k.utf8decode(this.zipComment)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.versionMadeBy=this.reader.readString(2),this.versionNeeded=this.reader.readInt(2),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;d>e;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readString(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(h.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readString(4)===h.CENTRAL_FILE_HEADER;)a=new i({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(h.CENTRAL_DIRECTORY_END);if(-1===a)throw new Error("Corrupted zip : can't find end of central directory");if(this.reader.setIndex(a),this.checkSignature(h.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===g.MAX_VALUE_16BITS||this.diskWithCentralDirStart===g.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===g.MAX_VALUE_16BITS||this.centralDirRecords===g.MAX_VALUE_16BITS||this.centralDirSize===g.MAX_VALUE_32BITS||this.centralDirOffset===g.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),-1===a)throw new Error("Corrupted zip : can't find the ZIP64 end of central directory locator");this.reader.setIndex(a),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(h.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}},prepareReader:function(a){var b=g.getTypeOf(a);this.reader="string"!==b||j.uint8array?"nodebuffer"===b?new e(a):new f(g.transformTo("uint8array",a)):new d(a,this.loadOptions.optimizedBinaryString)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=c},{"./nodeBufferReader":12,"./object":13,"./signature":14,"./stringReader":15,"./support":17,"./uint8ArrayReader":18,"./utils":21,"./zipEntry":23}],23:[function(a,b){"use strict";function c(a,b){this.options=a,this.loadOptions=b}var d=a("./stringReader"),e=a("./utils"),f=a("./compressedObject"),g=a("./object");c.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},prepareCompressedContent:function(a,b,c){return function(){var d=a.index;a.setIndex(b);var e=a.readData(c);return a.setIndex(d),e}},prepareContent:function(a,b,c,d,f){return function(){var a=e.transformTo(d.uncompressInputType,this.getCompressedContent()),b=d.uncompress(a);if(b.length!==f)throw new Error("Bug : uncompressed data size mismatch");return b}},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readString(this.fileNameLength),a.skip(c),-1==this.compressedSize||-1==this.uncompressedSize)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize == -1 || uncompressedSize == -1)");if(b=e.findCompression(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+e.pretty(this.compressionMethod)+" unknown (inner file : "+this.fileName+")");if(this.decompressed=new f,this.decompressed.compressedSize=this.compressedSize,this.decompressed.uncompressedSize=this.uncompressedSize,this.decompressed.crc32=this.crc32,this.decompressed.compressionMethod=this.compressionMethod,this.decompressed.getCompressedContent=this.prepareCompressedContent(a,a.index,this.compressedSize,b),this.decompressed.getContent=this.prepareContent(a,a.index,this.compressedSize,b,this.uncompressedSize),this.loadOptions.checkCRC32&&(this.decompressed=e.transformTo("string",this.decompressed.getContent()),g.crc32(this.decompressed)!==this.crc32))throw new Error("Corrupted zip : CRC32 mismatch")},readCentralPart:function(a){if(this.versionMadeBy=a.readString(2),this.versionNeeded=a.readInt(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4),this.fileNameLength=a.readInt(2),this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");this.fileName=a.readString(this.fileNameLength),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readString(this.fileCommentLength),this.dir=16&this.externalFileAttributes?!0:!1},parseZIP64ExtraField:function(){if(this.extraFields[1]){var a=new d(this.extraFields[1].value);this.uncompressedSize===e.MAX_VALUE_32BITS&&(this.uncompressedSize=a.readInt(8)),this.compressedSize===e.MAX_VALUE_32BITS&&(this.compressedSize=a.readInt(8)),this.localHeaderOffset===e.MAX_VALUE_32BITS&&(this.localHeaderOffset=a.readInt(8)),this.diskNumberStart===e.MAX_VALUE_32BITS&&(this.diskNumberStart=a.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index;for(this.extraFields=this.extraFields||{};a.index<e+this.extraFieldsLength;)b=a.readInt(2),c=a.readInt(2),d=a.readString(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){if(this.useUTF8())this.fileName=g.utf8decode(this.fileName),this.fileComment=g.utf8decode(this.fileComment);else{var a=this.findExtraFieldUnicodePath();null!==a&&(this.fileName=a);var b=this.findExtraFieldUnicodeComment();null!==b&&(this.fileComment=b)}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileName)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=new d(a.value);return 1!==b.readInt(1)?null:g.crc32(this.fileComment)!==b.readInt(4)?null:g.utf8decode(b.readString(a.length-5))}return null}},b.exports=c},{"./compressedObject":2,"./object":13,"./stringReader":15,"./utils":21}],24:[function(a,b){"use strict";var c=a("./lib/utils/common").assign,d=a("./lib/deflate"),e=a("./lib/inflate"),f=a("./lib/zlib/constants"),g={};c(g,d,e,f),b.exports=g},{"./lib/deflate":25,"./lib/inflate":26,"./lib/utils/common":27,"./lib/zlib/constants":30}],25:[function(a,b,c){"use strict";function d(a,b){var c=new s(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}function f(a,b){return b=b||{},b.gzip=!0,d(a,b)}var g=a("./zlib/deflate.js"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=0,m=4,n=0,o=1,p=-1,q=0,r=8,s=function(a){this.options=h.assign({level:p,method:r,chunkSize:16384,windowBits:15,memLevel:8,strategy:q,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=g.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==n)throw new Error(j[c]);b.header&&g.deflateSetHeader(this.strm,b.header)};s.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?m:l,e.input="string"==typeof a?i.string2buf(a):a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new h.Buf8(f),e.next_out=0,e.avail_out=f),c=g.deflate(e,d),c!==o&&c!==n)return this.onEnd(c),this.ended=!0,!1;(0===e.avail_out||0===e.avail_in&&d===m)&&this.onData("string"===this.options.to?i.buf2binstring(h.shrinkBuf(e.output,e.next_out)):h.shrinkBuf(e.output,e.next_out))}while((e.avail_in>0||0===e.avail_out)&&c!==o);return d===m?(c=g.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===n):!0},s.prototype.onData=function(a){this.chunks.push(a)},s.prototype.onEnd=function(a){a===n&&(this.result="string"===this.options.to?this.chunks.join(""):h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=s,c.deflate=d,c.deflateRaw=e,c.gzip=f},{"./utils/common":27,"./utils/strings":28,"./zlib/deflate.js":32,"./zlib/messages":37,"./zlib/zstream":39}],26:[function(a,b,c){"use strict";function d(a,b){var c=new m(b);if(c.push(a,!0),c.err)throw c.msg;return c.result}function e(a,b){return b=b||{},b.raw=!0,d(a,b)}var f=a("./zlib/inflate.js"),g=a("./utils/common"),h=a("./utils/strings"),i=a("./zlib/constants"),j=a("./zlib/messages"),k=a("./zlib/zstream"),l=a("./zlib/gzheader"),m=function(a){this.options=g.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new k,this.strm.avail_out=0;var c=f.inflateInit2(this.strm,b.windowBits);if(c!==i.Z_OK)throw new Error(j[c]);this.header=new l,f.inflateGetHeader(this.strm,this.header)};m.prototype.push=function(a,b){var c,d,e,j,k,l=this.strm,m=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?i.Z_FINISH:i.Z_NO_FLUSH,l.input="string"==typeof a?h.binstring2buf(a):a,l.next_in=0,l.avail_in=l.input.length;do{if(0===l.avail_out&&(l.output=new g.Buf8(m),l.next_out=0,l.avail_out=m),c=f.inflate(l,i.Z_NO_FLUSH),c!==i.Z_STREAM_END&&c!==i.Z_OK)return this.onEnd(c),this.ended=!0,!1;l.next_out&&(0===l.avail_out||c===i.Z_STREAM_END||0===l.avail_in&&d===i.Z_FINISH)&&("string"===this.options.to?(e=h.utf8border(l.output,l.next_out),j=l.next_out-e,k=h.buf2string(l.output,e),l.next_out=j,l.avail_out=m-j,j&&g.arraySet(l.output,l.output,e,j,0),this.onData(k)):this.onData(g.shrinkBuf(l.output,l.next_out)))}while(l.avail_in>0&&c!==i.Z_STREAM_END);return c===i.Z_STREAM_END&&(d=i.Z_FINISH),d===i.Z_FINISH?(c=f.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===i.Z_OK):!0},m.prototype.onData=function(a){this.chunks.push(a)},m.prototype.onEnd=function(a){a===i.Z_OK&&(this.result="string"===this.options.to?this.chunks.join(""):g.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=m,c.inflate=d,c.inflateRaw=e,c.ungzip=d},{"./utils/common":27,"./utils/strings":28,"./zlib/constants":30,"./zlib/gzheader":33,"./zlib/inflate.js":35,"./zlib/messages":37,"./zlib/zstream":39}],27:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;c>b;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;c>b;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;d>f;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],28:[function(a,b,c){"use strict";function d(a,b){if(65537>b&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;b>d;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;256>j;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;h>f;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=128>c?1:2048>c?2:65536>c?3:4;for(b=new e.Buf8(i),g=0,f=0;i>g;f++)c=a.charCodeAt(f),55296===(64512&c)&&h>f+1&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),128>c?b[g++]=c:2048>c?(b[g++]=192|c>>>6,b[g++]=128|63&c):65536>c?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;d>c;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;h>c;)if(f=a[c++],128>f)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&h>c;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:65536>f?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return 0>c?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":27}],29:[function(a,b){"use strict";function c(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0}b.exports=c},{}],30:[function(a,b){b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],31:[function(a,b){"use strict";function c(){for(var a,b=[],c=0;256>c;c++){a=c;for(var d=0;8>d;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function d(a,b,c,d){var f=e,g=d+c;a=-1^a;for(var h=d;g>h;h++)a=a>>>8^f[255&(a^b[h])];return-1^a}var e=c();b.exports=d},{}],32:[function(a,b,c){"use strict";function d(a,b){return a.msg=G[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(C.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){D._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,C.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=E(a.adler,b,e,c):2===a.state.wrap&&(a.adler=F(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-jb?a.strstart-(a.w_size-jb):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ib,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&m>f);if(d=ib-(m-f),f=m-ib,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-jb)){C.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=hb)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+hb-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<hb)););}while(a.lookahead<jb&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===H)return sb;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return sb;if(a.strstart-a.block_start>=a.w_size-jb&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?sb:sb}function o(a,b){for(var c,d;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c)),a.match_length>=hb)if(d=D._tr_tally(a,a.strstart-a.match_start,a.match_length-hb),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=hb){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function p(a,b){for(var c,d,e;;){if(a.lookahead<jb){if(m(a),a.lookahead<jb&&b===H)return sb;if(0===a.lookahead)break}if(c=0,a.lookahead>=hb&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=hb-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-jb&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===S||a.match_length===hb&&a.strstart-a.match_start>4096)&&(a.match_length=hb-1)),a.prev_length>=hb&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-hb,d=D._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-hb),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+hb-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=hb-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return sb}else if(a.match_available){if(d=D._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return sb}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=D._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<hb-1?a.strstart:hb-1,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ib){if(m(a),a.lookahead<=ib&&b===H)return sb;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=hb&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ib;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&f>e);a.match_length=ib-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=hb?(c=D._tr_tally(a,1,a.match_length-hb),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===H)return sb;break}if(a.match_length=0,c=D._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return sb}return a.insert=0,b===K?(h(a,!0),0===a.strm.avail_out?ub:vb):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?sb:tb}function s(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=B[a.level].max_lazy,a.good_match=B[a.level].good_length,a.nice_match=B[a.level].nice_length,a.max_chain_length=B[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=hb-1,a.match_available=0,a.ins_h=0}function t(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=Y,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new C.Buf16(2*fb),this.dyn_dtree=new C.Buf16(2*(2*db+1)),this.bl_tree=new C.Buf16(2*(2*eb+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new C.Buf16(gb+1),this.heap=new C.Buf16(2*cb+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new C.Buf16(2*cb+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function u(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=X,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?lb:qb,a.adler=2===b.wrap?0:1,b.last_flush=H,D._tr_init(b),M):d(a,O)}function v(a){var b=u(a);return b===M&&s(a.state),b}function w(a,b){return a&&a.state?2!==a.state.wrap?O:(a.state.gzhead=b,M):O}function x(a,b,c,e,f,g){if(!a)return O;var h=1;if(b===R&&(b=6),0>e?(h=0,e=-e):e>15&&(h=2,e-=16),1>f||f>Z||c!==Y||8>e||e>15||0>b||b>9||0>g||g>V)return d(a,O);8===e&&(e=9);var i=new t;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+hb-1)/hb),i.window=new C.Buf8(2*i.w_size),i.head=new C.Buf16(i.hash_size),i.prev=new C.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new C.Buf8(i.pending_buf_size),i.d_buf=i.lit_bufsize>>1,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,v(a)}function y(a,b){return x(a,b,Y,$,_,W)}function z(a,b){var c,h,k,l;if(!a||!a.state||b>L||0>b)return a?d(a,O):O;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===rb&&b!==K)return d(a,0===a.avail_out?Q:O);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===lb)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=F(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=mb):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=T||h.level<2?4:0),i(h,wb),h.status=qb);else{var m=Y+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=T||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=kb),m+=31-m%31,h.status=qb,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===mb)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=nb)}else h.status=nb;if(h.status===nb)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=ob)}else h.status=ob;if(h.status===ob)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=F(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=pb)}else h.status=pb;if(h.status===pb&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=qb)):h.status=qb),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,M}else if(0===a.avail_in&&e(b)<=e(c)&&b!==K)return d(a,Q);if(h.status===rb&&0!==a.avail_in)return d(a,Q);if(0!==a.avail_in||0!==h.lookahead||b!==H&&h.status!==rb){var o=h.strategy===T?r(h,b):h.strategy===U?q(h,b):B[h.level].func(h,b);if((o===ub||o===vb)&&(h.status=rb),o===sb||o===ub)return 0===a.avail_out&&(h.last_flush=-1),M;if(o===tb&&(b===I?D._tr_align(h):b!==L&&(D._tr_stored_block(h,0,0,!1),b===J&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,M}return b!==K?M:h.wrap<=0?N:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?M:N)}function A(a){var b;return a&&a.state?(b=a.state.status,b!==lb&&b!==mb&&b!==nb&&b!==ob&&b!==pb&&b!==qb&&b!==rb?d(a,O):(a.state=null,b===qb?d(a,P):M)):O}var B,C=a("../utils/common"),D=a("./trees"),E=a("./adler32"),F=a("./crc32"),G=a("./messages"),H=0,I=1,J=3,K=4,L=5,M=0,N=1,O=-2,P=-3,Q=-5,R=-1,S=1,T=2,U=3,V=4,W=0,X=2,Y=8,Z=9,$=15,_=8,ab=29,bb=256,cb=bb+1+ab,db=30,eb=19,fb=2*cb+1,gb=15,hb=3,ib=258,jb=ib+hb+1,kb=32,lb=42,mb=69,nb=73,ob=91,pb=103,qb=113,rb=666,sb=1,tb=2,ub=3,vb=4,wb=3,xb=function(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e};B=[new xb(0,0,0,0,n),new xb(4,4,8,4,o),new xb(4,5,16,8,o),new xb(4,6,32,32,o),new xb(4,4,16,16,p),new xb(8,16,32,32,p),new xb(8,16,128,128,p),new xb(8,32,128,256,p),new xb(32,128,258,1024,p),new xb(32,258,258,4096,p)],c.deflateInit=y,c.deflateInit2=x,c.deflateReset=v,c.deflateResetKeep=u,c.deflateSetHeader=w,c.deflate=z,c.deflateEnd=A,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./messages":37,"./trees":38}],33:[function(a,b){"use strict";function c(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=c},{}],34:[function(a,b){"use strict";var c=30,d=12;b.exports=function(a,b){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;e=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=e.dmax,l=e.wsize,m=e.whave,n=e.wnext,o=e.window,p=e.hold,q=e.bits,r=e.lencode,s=e.distcode,t=(1<<e.lenbits)-1,u=(1<<e.distbits)-1;a:do{15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){e.mode=d;break a}a.msg="invalid literal/length code",e.mode=c;break a}x=65535&v,w&=15,w&&(w>q&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),15>q&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",e.mode=c;break a}if(y=65535&v,w&=15,w>q&&(p+=B[f++]<<q,q+=8,w>q&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",e.mode=c;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&e.sane){a.msg="invalid distance too far back",e.mode=c;break a}if(z=0,A=o,0===n){if(z+=l-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(w>n){if(z+=l+n-w,w-=n,x>w){x-=w;do C[h++]=o[z++];while(--w);if(z=0,x>n){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,x>w){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(g>f&&j>h);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=g>f?5+(g-f):5-(f-g),a.avail_out=j>h?257+(j-h):257-(h-j),e.hold=p,e.bits=q}},{}],35:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new r.Buf16(320),this.work=new r.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=K,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new r.Buf32(ob),b.distcode=b.distdyn=new r.Buf32(pb),b.sane=1,b.back=-1,C):F}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):F}function h(a,b){var c,d;return a&&a.state?(d=a.state,0>b?(c=0,b=-b):(c=(b>>4)+1,48>b&&(b&=15)),b&&(8>b||b>15)?F:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):F}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==C&&(a.state=null),c):F}function j(a){return i(a,rb)}function k(a){if(sb){var b;for(p=new r.Buf32(512),q=new r.Buf32(32),b=0;144>b;)a.lens[b++]=8;for(;256>b;)a.lens[b++]=9;for(;280>b;)a.lens[b++]=7;for(;288>b;)a.lens[b++]=8;for(v(x,a.lens,0,288,p,0,a.work,{bits:9}),b=0;32>b;)a.lens[b++]=5;v(y,a.lens,0,32,q,0,a.work,{bits:5}),sb=!1}a.lencode=p,a.lenbits=9,a.distcode=q,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new r.Buf8(f.wsize)),d>=f.wsize?(r.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),r.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(r.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,ob,pb,qb,rb,sb,tb,ub,vb,wb,xb,yb,zb,Ab=0,Bb=new r.Buf8(4),Cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return F;c=a.state,c.mode===V&&(c.mode=W),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xb=C;a:for(;;)switch(c.mode){case K:if(0===c.wrap){c.mode=W;break}for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0),m=0,n=0,c.mode=L;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=lb;break}if((15&m)!==J){a.msg="unknown compression method",c.mode=lb;break}if(m>>>=4,n-=4,wb=(15&m)+8,0===c.wbits)c.wbits=wb;else if(wb>c.wbits){a.msg="invalid window size",c.mode=lb;break}c.dmax=1<<wb,a.adler=c.check=1,c.mode=512&m?T:V,m=0,n=0;break;case L:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==J){a.msg="unknown compression method",c.mode=lb;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=lb;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=M;case M:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,Bb[2]=m>>>16&255,Bb[3]=m>>>24&255,c.check=t(c.check,Bb,4,0)),m=0,n=0,c.mode=N;case N:for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0,c.mode=O;case O:if(1024&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Bb[0]=255&m,Bb[1]=m>>>8&255,c.check=t(c.check,Bb,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=P;case P:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wb=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),r.arraySet(c.head.extra,e,g,q,wb)),512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=Q;case Q:if(2048&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.name+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=R;case R:if(4096&c.flags){if(0===i)break a;q=0;do wb=e[g+q++],c.head&&wb&&c.length<65536&&(c.head.comment+=String.fromCharCode(wb));while(wb&&i>q);if(512&c.flags&&(c.check=t(c.check,e,q,g)),i-=q,g+=q,wb)break a}else c.head&&(c.head.comment=null);c.mode=S;case S:if(512&c.flags){for(;16>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=lb;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=V;break;case T:for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=U;case U:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,E;a.adler=c.check=1,c.mode=V;case V:if(b===A||b===B)break a;case W:if(c.last){m>>>=7&n,n-=7&n,c.mode=ib;break}for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=X;break;case 1:if(k(c),c.mode=bb,b===B){m>>>=2,n-=2;break a}break;case 2:c.mode=$;break;case 3:a.msg="invalid block type",c.mode=lb}m>>>=2,n-=2;break;case X:for(m>>>=7&n,n-=7&n;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=lb;break}if(c.length=65535&m,m=0,n=0,c.mode=Y,b===B)break a;case Y:c.mode=Z;case Z:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;r.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=V;break;case $:for(;14>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=lb;break}c.have=0,c.mode=_;case _:for(;c.have<c.ncode;){for(;3>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Cb[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Cb[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,yb={bits:c.lenbits},xb=v(w,c.lens,0,19,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid code lengths set",c.mode=lb;break}c.have=0,c.mode=ab;case ab:for(;c.have<c.nlen+c.ndist;){for(;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(16>sb)m>>>=qb,n-=qb,c.lens[c.have++]=sb;else{if(16===sb){for(zb=qb+2;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qb,n-=qb,0===c.have){a.msg="invalid bit length repeat",c.mode=lb;break}wb=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sb){for(zb=qb+3;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=3+(7&m),m>>>=3,n-=3}else{for(zb=qb+7;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qb,n-=qb,wb=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=lb;break}for(;q--;)c.lens[c.have++]=wb}}if(c.mode===lb)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=lb;break}if(c.lenbits=9,yb={bits:c.lenbits},xb=v(x,c.lens,0,c.nlen,c.lencode,0,c.work,yb),c.lenbits=yb.bits,xb){a.msg="invalid literal/lengths set",c.mode=lb;break}if(c.distbits=6,c.distcode=c.distdyn,yb={bits:c.distbits},xb=v(y,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,yb),c.distbits=yb.bits,xb){a.msg="invalid distances set",c.mode=lb;break}if(c.mode=bb,b===B)break a;case bb:c.mode=cb;case cb:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,u(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===V&&(c.back=-1);break}for(c.back=0;Ab=c.lencode[m&(1<<c.lenbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(rb&&0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.lencode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,c.length=sb,0===rb){c.mode=hb;break}if(32&rb){c.back=-1,c.mode=V;break}if(64&rb){a.msg="invalid literal/length code",c.mode=lb;break}c.extra=15&rb,c.mode=db;case db:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=eb;case eb:for(;Ab=c.distcode[m&(1<<c.distbits)-1],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&rb)){for(tb=qb,ub=rb,vb=sb;Ab=c.distcode[vb+((m&(1<<tb+ub)-1)>>tb)],qb=Ab>>>24,rb=Ab>>>16&255,sb=65535&Ab,!(n>=tb+qb);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=tb,n-=tb,c.back+=tb}if(m>>>=qb,n-=qb,c.back+=qb,64&rb){a.msg="invalid distance code",c.mode=lb;break}c.offset=sb,c.extra=15&rb,c.mode=fb;case fb:if(c.extra){for(zb=c.extra;zb>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=lb;break}c.mode=gb;case gb:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.sane){a.msg="invalid distance too far back",c.mode=lb;break}q>c.wnext?(q-=c.wnext,ob=c.wsize-q):ob=c.wnext-q,q>c.length&&(q=c.length),pb=c.window}else pb=f,ob=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pb[ob++];while(--q);0===c.length&&(c.mode=cb);break;case hb:if(0===j)break a;f[h++]=c.length,j--,c.mode=cb;break;case ib:if(c.wrap){for(;32>n;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?t(c.check,f,p,h-p):s(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=lb;break}m=0,n=0}c.mode=jb;case jb:if(c.wrap&&c.flags){for(;32>n;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=lb;break}m=0,n=0}c.mode=kb;case kb:xb=D;break a;case lb:xb=G;break a;case mb:return H;case nb:default:return F}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<lb&&(c.mode<ib||b!==z))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=mb,H):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?t(c.check,f,p,a.next_out-p):s(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===V?128:0)+(c.mode===bb||c.mode===Y?256:0),(0===o&&0===p||b===z)&&xb===C&&(xb=I),xb)}function n(a){if(!a||!a.state)return F;var b=a.state;return b.window&&(b.window=null),a.state=null,C}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?F:(c.head=b,b.done=!1,C)):F}var p,q,r=a("../utils/common"),s=a("./adler32"),t=a("./crc32"),u=a("./inffast"),v=a("./inftrees"),w=0,x=1,y=2,z=4,A=5,B=6,C=0,D=1,E=2,F=-2,G=-3,H=-4,I=-5,J=8,K=1,L=2,M=3,N=4,O=5,P=6,Q=7,R=8,S=9,T=10,U=11,V=12,W=13,X=14,Y=15,Z=16,$=17,_=18,ab=19,bb=20,cb=21,db=22,eb=23,fb=24,gb=25,hb=26,ib=27,jb=28,kb=29,lb=30,mb=31,nb=32,ob=852,pb=592,qb=15,rb=qb,sb=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":27,"./adler32":29,"./crc32":31,"./inffast":34,"./inftrees":36}],36:[function(a,b){"use strict";var c=a("../utils/common"),d=15,e=852,f=592,g=0,h=1,i=2,j=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],k=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],l=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],m=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,n,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new c.Buf16(d+1),Q=new c.Buf16(d+1),R=null,S=0;for(D=0;d>=D;D++)P[D]=0;for(E=0;o>E;E++)P[b[n+E]]++;for(H=C,G=d;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;G>F&&0===P[F];F++);for(F>H&&(H=F),K=1,D=1;d>=D;D++)if(K<<=1,K-=P[D],0>K)return-1;if(K>0&&(a===g||1!==G))return-1;for(Q[1]=0,D=1;d>D;D++)Q[D+1]=Q[D]+P[D];for(E=0;o>E;E++)0!==b[n+E]&&(r[Q[b[n+E]]++]=E);if(a===g?(N=R=r,y=19):a===h?(N=j,O-=257,R=k,S-=257,y=256):(N=l,R=m,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===h&&L>e||a===i&&L>f)return 1;for(var T=0;;){T++,z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[n+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;G>I+J&&(K-=P[I+J],!(0>=K));)I++,K<<=1;if(L+=1<<I,a===h&&L>e||a===i&&L>f)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":27}],37:[function(a,b){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],38:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a){return 256>a?gb[a]:gb[256+(a>>>7)]}function f(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function g(a,b,c){a.bi_valid>V-c?(a.bi_buf|=b<<a.bi_valid&65535,f(a,a.bi_buf),a.bi_buf=b>>V-a.bi_valid,a.bi_valid+=c-V):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function h(a,b,c){g(a,c[2*b],c[2*b+1])}function i(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function j(a){16===a.bi_valid?(f(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function k(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;U>=f;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,c=a.heap_max+1;T>c;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function l(a,b,c){var d,e,f=new Array(U+1),g=0;for(d=1;U>=d;d++)f[d]=g=g+c[d-1]<<1;for(e=0;b>=e;e++){var h=a[2*e+1];0!==h&&(a[2*e]=i(f[h]++,h))}}function m(){var a,b,c,d,e,f=new Array(U+1);for(c=0,d=0;O-1>d;d++)for(ib[d]=c,a=0;a<1<<_[d];a++)hb[c++]=d;for(hb[c-1]=d,e=0,d=0;16>d;d++)for(jb[d]=e,a=0;a<1<<ab[d];a++)gb[e++]=d;for(e>>=7;R>d;d++)for(jb[d]=e<<7,a=0;a<1<<ab[d]-7;a++)gb[256+e++]=d;for(b=0;U>=b;b++)f[b]=0;for(a=0;143>=a;)eb[2*a+1]=8,a++,f[8]++;for(;255>=a;)eb[2*a+1]=9,a++,f[9]++;for(;279>=a;)eb[2*a+1]=7,a++,f[7]++;for(;287>=a;)eb[2*a+1]=8,a++,f[8]++;for(l(eb,Q+1,f),a=0;R>a;a++)fb[2*a+1]=5,fb[2*a]=i(a,5);kb=new nb(eb,_,P+1,Q,U),lb=new nb(fb,ab,0,R,U),mb=new nb(new Array(0),bb,0,S,W)}function n(a){var b;for(b=0;Q>b;b++)a.dyn_ltree[2*b]=0;for(b=0;R>b;b++)a.dyn_dtree[2*b]=0;for(b=0;S>b;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*X]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function o(a){a.bi_valid>8?f(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function p(a,b,c,d){o(a),d&&(f(a,c),f(a,~c)),E.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function q(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function r(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&q(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!q(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function s(a,b,c){var d,f,i,j,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],f=a.pending_buf[a.l_buf+k],k++,0===d?h(a,f,b):(i=hb[f],h(a,i+P+1,b),j=_[i],0!==j&&(f-=ib[i],g(a,f,j)),d--,i=e(d),h(a,i,c),j=ab[i],0!==j&&(d-=jb[i],g(a,d,j)));while(k<a.last_lit);h(a,X,b)}function t(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=T,c=0;i>c;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=2>j?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)r(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],r(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,r(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],k(a,b),l(f,j,a.bl_count)}function u(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;c>=d;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(j>h?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*Y]++):10>=h?a.bl_tree[2*Z]++:a.bl_tree[2*$]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function v(a,b,c){var d,e,f=-1,i=b[1],j=0,k=7,l=4;for(0===i&&(k=138,l=3),d=0;c>=d;d++)if(e=i,i=b[2*(d+1)+1],!(++j<k&&e===i)){if(l>j){do h(a,e,a.bl_tree);while(0!==--j)}else 0!==e?(e!==f&&(h(a,e,a.bl_tree),j--),h(a,Y,a.bl_tree),g(a,j-3,2)):10>=j?(h(a,Z,a.bl_tree),g(a,j-3,3)):(h(a,$,a.bl_tree),g(a,j-11,7));j=0,f=e,0===i?(k=138,l=3):e===i?(k=6,l=3):(k=7,l=4)}}function w(a){var b;for(u(a,a.dyn_ltree,a.l_desc.max_code),u(a,a.dyn_dtree,a.d_desc.max_code),t(a,a.bl_desc),b=S-1;b>=3&&0===a.bl_tree[2*cb[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function x(a,b,c,d){var e;for(g(a,b-257,5),g(a,c-1,5),g(a,d-4,4),e=0;d>e;e++)g(a,a.bl_tree[2*cb[e]+1],3);v(a,a.dyn_ltree,b-1),v(a,a.dyn_dtree,c-1)}function y(a){var b,c=4093624447;for(b=0;31>=b;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return G;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return H;for(b=32;P>b;b++)if(0!==a.dyn_ltree[2*b])return H;return G}function z(a){pb||(m(),pb=!0),a.l_desc=new ob(a.dyn_ltree,kb),a.d_desc=new ob(a.dyn_dtree,lb),a.bl_desc=new ob(a.bl_tree,mb),a.bi_buf=0,a.bi_valid=0,n(a)}function A(a,b,c,d){g(a,(J<<1)+(d?1:0),3),p(a,b,c,!0)}function B(a){g(a,K<<1,3),h(a,X,eb),j(a)}function C(a,b,c,d){var e,f,h=0;a.level>0?(a.strm.data_type===I&&(a.strm.data_type=y(a)),t(a,a.l_desc),t(a,a.d_desc),h=w(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,e>=f&&(e=f)):e=f=c+5,e>=c+4&&-1!==b?A(a,b,c,d):a.strategy===F||f===e?(g(a,(K<<1)+(d?1:0),3),s(a,eb,fb)):(g(a,(L<<1)+(d?1:0),3),x(a,a.l_desc.max_code+1,a.d_desc.max_code+1,h+1),s(a,a.dyn_ltree,a.dyn_dtree)),n(a),d&&o(a)}function D(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(hb[c]+P+1)]++,a.dyn_dtree[2*e(b)]++),a.last_lit===a.lit_bufsize-1}var E=a("../utils/common"),F=4,G=0,H=1,I=2,J=0,K=1,L=2,M=3,N=258,O=29,P=256,Q=P+1+O,R=30,S=19,T=2*Q+1,U=15,V=16,W=7,X=256,Y=16,Z=17,$=18,_=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ab=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],bb=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],cb=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],db=512,eb=new Array(2*(Q+2));d(eb);var fb=new Array(2*R);d(fb);var gb=new Array(db);d(gb);var hb=new Array(N-M+1);d(hb);var ib=new Array(O);d(ib);var jb=new Array(R);d(jb);var kb,lb,mb,nb=function(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length},ob=function(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b},pb=!1;c._tr_init=z,c._tr_stored_block=A,c._tr_flush_block=C,c._tr_tally=D,c._tr_align=B},{"../utils/common":27}],39:[function(a,b){"use strict";function c(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=c},{}]},{},[9])(9)});'use strict';if(tr.isVinn){global.JSZip=global.window.JSZip;global.window=undefined;}else if(tr.isNode){const jsZipAbsPath=HTMLImportsLoader.hrefToAbsolutePath('/jszip.min.js');const jsZipModule=require(jsZipAbsPath);global.JSZip=jsZipModule;}'use strict';tr.exportTo('tr.e.importer',function(){function ZipImporter(model,eventData){if(eventData instanceof ArrayBuffer){eventData=new Uint8Array(eventData);} this.model_=model;this.eventData_=eventData;} ZipImporter.canImport=function(eventData){let header;if(eventData instanceof ArrayBuffer){header=new Uint8Array(eventData.slice(0,2));}else if(typeof(eventData)==='string'||eventData instanceof String){header=[eventData.charCodeAt(0),eventData.charCodeAt(1)];}else{return false;} return header[0]==='P'.charCodeAt(0)&&header[1]==='K'.charCodeAt(0);};ZipImporter.prototype={__proto__:tr.importer.Importer.prototype,get importerName(){return'ZipImporter';},isTraceDataContainer(){return true;},extractSubtraces(){const zip=new JSZip(this.eventData_);const subtraces=[];for(const idx in zip.files){subtraces.push(zip.files[idx].asBinary());} @@ -5549,7 +5549,7 @@ XMarkerAnnotationView.prototype={__proto__:tr.ui.annotations.AnnotationView.prototype,draw(ctx){const dt=this.viewport_.currentDisplayTransform;const viewX=dt.xWorldToView(this.annotation_.timestamp);ctx.beginPath();tr.ui.b.drawLine(ctx,viewX,0,viewX,ctx.canvas.height);ctx.strokeStyle=this.annotation_.strokeStyle;ctx.stroke();}};return{XMarkerAnnotationView,};});'use strict';tr.exportTo('tr.model',function(){function XMarkerAnnotation(timestamp){tr.model.Annotation.apply(this,arguments);this.timestamp=timestamp;this.strokeStyle='rgba(0, 0, 255, 0.5)';} XMarkerAnnotation.fromDict=function(dict){return new XMarkerAnnotation(dict.args.timestamp);};XMarkerAnnotation.prototype={__proto__:tr.model.Annotation.prototype,toDict(){return{typeName:'xmarker',args:{timestamp:this.timestamp}};},createView_(viewport){return new tr.ui.annotations.XMarkerAnnotationView(viewport,this);}};tr.model.Annotation.register(XMarkerAnnotation,{typeName:'xmarker'});return{XMarkerAnnotation,};});'use strict';tr.exportTo('tr.e.importer',function(){const Base64=tr.b.Base64;const deepCopy=tr.b.deepCopy;const ColorScheme=tr.b.ColorScheme;const HeapDumpTraceEventImporter=tr.e.importer.HeapDumpTraceEventImporter;const LegacyHeapDumpTraceEventImporter=tr.e.importer.LegacyHeapDumpTraceEventImporter;const StreamingEventExpander=tr.e.importer.StreamingEventExpander;const ProfilingDictionaryReader=tr.e.importer.ProfilingDictionaryReader;function getEventColor(event,opt_customName){if(event.cname){return ColorScheme.getColorIdForReservedName(event.cname);}else if(opt_customName||event.name){return ColorScheme.getColorIdForGeneralPurposeString(opt_customName||event.name);}} function isLegacyChromeClockSyncEvent(event){return event.name!==undefined&&event.name.startsWith(LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX)&&((event.ph==='S')||(event.ph==='F'));} -const PRODUCER='producer';const CONSUMER='consumer';const STEP='step';const BACKGROUND=tr.model.ContainerMemoryDump.LevelOfDetail.BACKGROUND;const LIGHT=tr.model.ContainerMemoryDump.LevelOfDetail.LIGHT;const DETAILED=tr.model.ContainerMemoryDump.LevelOfDetail.DETAILED;const MEMORY_DUMP_LEVEL_OF_DETAIL_ORDER=[undefined,BACKGROUND,LIGHT,DETAILED];const GLOBAL_MEMORY_ALLOCATOR_DUMP_PREFIX='global/';const LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX='ClockSyncEvent.';const BYTE_STAT_NAME_MAP={'pc':'privateCleanResident','pd':'privateDirtyResident','sc':'sharedCleanResident','sd':'sharedDirtyResident','pss':'proportionalResident','sw':'swapped'};const WEAK_MEMORY_ALLOCATOR_DUMP_FLAG=1<<0;const OBJECT_TYPE_NAME_PATTERNS=[{prefix:'const char *WOW::getStringWithTypeName() [T = ',suffix:']'},{prefix:'const char* WOW::getStringWithTypeName() [with T = ',suffix:']'},{prefix:'const char *__cdecl WOW::getStringWithTypeName<',suffix:'>(void)'}];const SUBTRACE_FIELDS=new Set(['powerTraceAsString','systemTraceEvents',]);const NON_METADATA_FIELDS=new Set(['displayTimeUnit','samples','stackFrames','traceAnnotations','traceEvents',...SUBTRACE_FIELDS]);function TraceEventImporter(model,eventData){this.hasEvents_=undefined;this.importPriority=1;this.model_=model;this.events_=undefined;this.sampleEvents_=undefined;this.stackFrameEvents_=undefined;this.stackFrameTree_=new tr.model.ProfileTree();this.subtraces_=[];this.eventsWereFromString_=false;this.softwareMeasuredCpuCount_=undefined;this.allAsyncEvents_=[];this.allFlowEvents_=[];this.allObjectEvents_=[];this.contextProcessorPerThread={};this.traceEventSampleStackFramesByName_={};this.v8ProcessCodeMaps_={};this.v8ProcessRootStackFrame_={};this.v8SamplingData_=[];this.profileTrees_=new Map();this.profileInfo_=new Map();this.legacyChromeClockSyncStartEvent_=undefined;this.legacyChromeClockSyncFinishEvent_=undefined;this.allMemoryDumpEvents_={};this.heapProfileExpander=new ProfilingDictionaryReader();this.objectTypeNameMap_={};this.clockDomainId_=tr.model.ClockDomainId.UNKNOWN_CHROME_LEGACY;this.toModelTime_=undefined;if(typeof(eventData)==='string'||eventData instanceof String){eventData=eventData.trim();if(eventData[0]==='['){eventData=eventData.replace(/\s*,\s*$/,'');if(eventData[eventData.length-1]!==']'){eventData=eventData+']';}} +const PRODUCER='producer';const CONSUMER='consumer';const STEP='step';const BACKGROUND=tr.model.ContainerMemoryDump.LevelOfDetail.BACKGROUND;const LIGHT=tr.model.ContainerMemoryDump.LevelOfDetail.LIGHT;const DETAILED=tr.model.ContainerMemoryDump.LevelOfDetail.DETAILED;const MEMORY_DUMP_LEVEL_OF_DETAIL_ORDER=[undefined,BACKGROUND,LIGHT,DETAILED];const GLOBAL_MEMORY_ALLOCATOR_DUMP_PREFIX='global/';const LEGACY_CHROME_CLOCK_SYNC_EVENT_NAME_PREFIX='ClockSyncEvent.';const BYTE_STAT_NAME_MAP={'pc':'privateCleanResident','pd':'privateDirtyResident','sc':'sharedCleanResident','sd':'sharedDirtyResident','pss':'proportionalResident','sw':'swapped'};const WEAK_MEMORY_ALLOCATOR_DUMP_FLAG=1<<0;const OBJECT_TYPE_NAME_PATTERNS=[{prefix:'const char *WTF::getStringWithTypeName() [T = ',suffix:']'},{prefix:'const char* WTF::getStringWithTypeName() [with T = ',suffix:']'},{prefix:'const char *__cdecl WTF::getStringWithTypeName<',suffix:'>(void)'}];const SUBTRACE_FIELDS=new Set(['powerTraceAsString','systemTraceEvents',]);const NON_METADATA_FIELDS=new Set(['displayTimeUnit','samples','stackFrames','traceAnnotations','traceEvents',...SUBTRACE_FIELDS]);function TraceEventImporter(model,eventData){this.hasEvents_=undefined;this.importPriority=1;this.model_=model;this.events_=undefined;this.sampleEvents_=undefined;this.stackFrameEvents_=undefined;this.stackFrameTree_=new tr.model.ProfileTree();this.subtraces_=[];this.eventsWereFromString_=false;this.softwareMeasuredCpuCount_=undefined;this.allAsyncEvents_=[];this.allFlowEvents_=[];this.allObjectEvents_=[];this.contextProcessorPerThread={};this.traceEventSampleStackFramesByName_={};this.v8ProcessCodeMaps_={};this.v8ProcessRootStackFrame_={};this.v8SamplingData_=[];this.profileTrees_=new Map();this.profileInfo_=new Map();this.legacyChromeClockSyncStartEvent_=undefined;this.legacyChromeClockSyncFinishEvent_=undefined;this.allMemoryDumpEvents_={};this.heapProfileExpander=new ProfilingDictionaryReader();this.objectTypeNameMap_={};this.clockDomainId_=tr.model.ClockDomainId.UNKNOWN_CHROME_LEGACY;this.toModelTime_=undefined;if(typeof(eventData)==='string'||eventData instanceof String){eventData=eventData.trim();if(eventData[0]==='['){eventData=eventData.replace(/\s*,\s*$/,'');if(eventData[eventData.length-1]!==']'){eventData=eventData+']';}} this.events_=JSON.parse(eventData);this.eventsWereFromString_=true;}else{this.events_=eventData;} if(this.events_.traceEvents){const container=this.events_;this.events_=this.events_.traceEvents;for(const subtraceField of SUBTRACE_FIELDS){if(container[subtraceField]){this.storeSubtrace_(container[subtraceField]);}} this.storeSamples_(container.samples);this.storeStackFrames_(container.stackFrames);this.storeDisplayTimeUnit_(container.displayTimeUnit);this.storeTraceAnnotations_(container.traceAnnotations);this.storeMetadata_(container);}else if(this.events_ instanceof tr.b.TraceStream){const parser=oboe().node('{cat ph}',function(e){return oboe.drop;}).node('!.powerTraceAsString',this.storeSubtrace_.bind(this)).node('!.systemTraceEvents',this.storeSubtrace_.bind(this)).node('!.samples',this.storeSamples_.bind(this)).node('!.stackFrames',this.storeStackFrames_.bind(this)).node('!.displayTimeUnit',this.storeDisplayTimeUnit_.bind(this)).node('!.traceAnnotations',this.storeTraceAnnotations_.bind(this)).done(this.storeMetadata_.bind(this));this.events_.rewind();while(this.events_.hasData){parser.write(this.events_.readNumBytes());} @@ -6970,7 +6970,7 @@ get length(){return this._diagnostics.length;}*[Symbol.iterator](){for(const diagnostic of this._diagnostics)yield diagnostic;} asDictInto_(d){d.diagnostics=this._diagnostics.map(d=>d.asDictOrReference());} static fromDict(d){return new UnmergeableDiagnosticSet(d.diagnostics.map(d=>((typeof d==='string')?new tr.v.d.DiagnosticRef(d):tr.v.d.Diagnostic.fromDict(d))));}} -tr.v.d.Diagnostic.register(UnmergeableDiagnosticSet,{elementName:'tr-v-ui-unmergeable-diagnostic-set-span'});return{UnmergeableDiagnosticSet,};});'use strict';tr.exportTo('tr.v.d',function(){const RESERVED_INFOS={ANGLE_REVISIONS:{name:'angleRevisions',type:tr.v.d.GenericSet},ARCHITECTURES:{name:'architectures',type:tr.v.d.GenericSet},BENCHMARKS:{name:'benchmarks',type:tr.v.d.GenericSet},BENCHMARK_START:{name:'benchmarkStart',type:tr.v.d.DateRange},BENCHMARK_DESCRIPTIONS:{name:'benchmarkDescriptions',type:tr.v.d.GenericSet},BOTS:{name:'bots',type:tr.v.d.GenericSet},BUG_COMPONENTS:{name:'bugComponents',type:tr.v.d.GenericSet},BUILDS:{name:'builds',type:tr.v.d.GenericSet},CATAPULT_REVISIONS:{name:'catapultRevisions',type:tr.v.d.GenericSet},CHROMIUM_COMMIT_POSITIONS:{name:'chromiumCommitPositions',type:tr.v.d.GenericSet},CHROMIUM_REVISIONS:{name:'chromiumRevisions',type:tr.v.d.GenericSet},DEVICE_IDS:{name:'deviceIds',type:tr.v.d.GenericSet},GPUS:{name:'gpus',type:tr.v.d.GenericSet},GROUPING_PATH:{name:'groupingPath',type:tr.v.d.GroupingPath},IS_REFERENCE_BUILD:{name:'isReferenceBuild',type:tr.v.d.GenericSet},LABELS:{name:'labels',type:tr.v.d.GenericSet},LOG_URLS:{name:'logUrls',type:tr.v.d.GenericSet},PRIMARYS:{name:'primarys',type:tr.v.d.GenericSet},MEMORY_AMOUNTS:{name:'memoryAmounts',type:tr.v.d.GenericSet},MERGED_FROM:{name:'mergedFrom',type:tr.v.d.RelatedHistogramMap},MERGED_TO:{name:'mergedTo',type:tr.v.d.RelatedHistogramMap},OS_NAMES:{name:'osNames',type:tr.v.d.GenericSet},OS_VERSIONS:{name:'osVersions',type:tr.v.d.GenericSet},OWNERS:{name:'owners',type:tr.v.d.GenericSet},PRODUCT_VERSIONS:{name:'productVersions',type:tr.v.d.GenericSet},RELATED_NAMES:{name:'relatedNames',type:tr.v.d.GenericSet},SKIA_REVISIONS:{name:'skiaRevisions',type:tr.v.d.GenericSet},STORIES:{name:'stories',type:tr.v.d.GenericSet},STORYSET_REPEATS:{name:'storysetRepeats',type:tr.v.d.GenericSet},STORY_TAGS:{name:'storyTags',type:tr.v.d.GenericSet},TAG_MAP:{name:'tagmap',type:tr.v.d.TagMap},TRACE_START:{name:'traceStart',type:tr.v.d.DateRange},TRACE_URLS:{name:'traceUrls',type:tr.v.d.GenericSet},V8_COMMIT_POSITIONS:{name:'v8CommitPositions',type:tr.v.d.DateRange},V8_REVISIONS:{name:'v8Revisions',type:tr.v.d.GenericSet},WEBRTC_REVISIONS:{name:'webrtcRevisions',type:tr.v.d.GenericSet},};const RESERVED_NAMES={};const RESERVED_NAMES_TO_TYPES=new Map();for(const[codename,info]of Object.entries(RESERVED_INFOS)){RESERVED_NAMES[codename]=info.name;if(RESERVED_NAMES_TO_TYPES.has(info.name)){throw new Error(`Duplicate reserved name "${info.name}"`);} +tr.v.d.Diagnostic.register(UnmergeableDiagnosticSet,{elementName:'tr-v-ui-unmergeable-diagnostic-set-span'});return{UnmergeableDiagnosticSet,};});'use strict';tr.exportTo('tr.v.d',function(){const RESERVED_INFOS={ANGLE_REVISIONS:{name:'angleRevisions',type:tr.v.d.GenericSet},ARCHITECTURES:{name:'architectures',type:tr.v.d.GenericSet},BENCHMARKS:{name:'benchmarks',type:tr.v.d.GenericSet},BENCHMARK_START:{name:'benchmarkStart',type:tr.v.d.DateRange},BENCHMARK_DESCRIPTIONS:{name:'benchmarkDescriptions',type:tr.v.d.GenericSet},BOTS:{name:'bots',type:tr.v.d.GenericSet},BUG_COMPONENTS:{name:'bugComponents',type:tr.v.d.GenericSet},BUILDS:{name:'builds',type:tr.v.d.GenericSet},CATAPULT_REVISIONS:{name:'catapultRevisions',type:tr.v.d.GenericSet},CHROMIUM_COMMIT_POSITIONS:{name:'chromiumCommitPositions',type:tr.v.d.GenericSet},CHROMIUM_REVISIONS:{name:'chromiumRevisions',type:tr.v.d.GenericSet},DEVICE_IDS:{name:'deviceIds',type:tr.v.d.GenericSet},GPUS:{name:'gpus',type:tr.v.d.GenericSet},GROUPING_PATH:{name:'groupingPath',type:tr.v.d.GroupingPath},IS_REFERENCE_BUILD:{name:'isReferenceBuild',type:tr.v.d.GenericSet},LABELS:{name:'labels',type:tr.v.d.GenericSet},LOG_URLS:{name:'logUrls',type:tr.v.d.GenericSet},MASTERS:{name:'masters',type:tr.v.d.GenericSet},MEMORY_AMOUNTS:{name:'memoryAmounts',type:tr.v.d.GenericSet},MERGED_FROM:{name:'mergedFrom',type:tr.v.d.RelatedHistogramMap},MERGED_TO:{name:'mergedTo',type:tr.v.d.RelatedHistogramMap},OS_NAMES:{name:'osNames',type:tr.v.d.GenericSet},OS_VERSIONS:{name:'osVersions',type:tr.v.d.GenericSet},OWNERS:{name:'owners',type:tr.v.d.GenericSet},PRODUCT_VERSIONS:{name:'productVersions',type:tr.v.d.GenericSet},RELATED_NAMES:{name:'relatedNames',type:tr.v.d.GenericSet},SKIA_REVISIONS:{name:'skiaRevisions',type:tr.v.d.GenericSet},STORIES:{name:'stories',type:tr.v.d.GenericSet},STORYSET_REPEATS:{name:'storysetRepeats',type:tr.v.d.GenericSet},STORY_TAGS:{name:'storyTags',type:tr.v.d.GenericSet},TAG_MAP:{name:'tagmap',type:tr.v.d.TagMap},TRACE_START:{name:'traceStart',type:tr.v.d.DateRange},TRACE_URLS:{name:'traceUrls',type:tr.v.d.GenericSet},V8_COMMIT_POSITIONS:{name:'v8CommitPositions',type:tr.v.d.DateRange},V8_REVISIONS:{name:'v8Revisions',type:tr.v.d.GenericSet},WEBRTC_REVISIONS:{name:'webrtcRevisions',type:tr.v.d.GenericSet},};const RESERVED_NAMES={};const RESERVED_NAMES_TO_TYPES=new Map();for(const[codename,info]of Object.entries(RESERVED_INFOS)){RESERVED_NAMES[codename]=info.name;if(RESERVED_NAMES_TO_TYPES.has(info.name)){throw new Error(`Duplicate reserved name "${info.name}"`);} RESERVED_NAMES_TO_TYPES.set(info.name,info.type);} const RESERVED_NAMES_SET=new Set(Object.values(RESERVED_NAMES));return{RESERVED_INFOS,RESERVED_NAMES,RESERVED_NAMES_SET,RESERVED_NAMES_TO_TYPES,};});'use strict';tr.exportTo('tr.v.d',function(){class DiagnosticMap extends Map{constructor(opt_allowReservedNames){super();if(opt_allowReservedNames===undefined){opt_allowReservedNames=true;} this.allowReservedNames_=opt_allowReservedNames;} @@ -7244,10 +7244,10 @@ if(dict.callback===undefined){throw new Error('callback must be given');} this.eventType_=dict.eventType;this.keyCodes_=[];if(dict.keyCode){this.pushKeyCode_(dict.keyCode);}else if(dict.keyCodes){dict.keyCodes.forEach(this.pushKeyCode_,this);} this.useCapture_=!!dict.useCapture;this.callback_=dict.callback;this.thisArg_=dict.thisArg!==undefined?dict.thisArg:undefined;this.helpText_=dict.helpText!==undefined?dict.helpText:undefined;} -HotKey.prototype={get eventType(){return this.eventType_;},get keyCodes(){return this.keyCodes_;},get helpText(){return this.helpText_;},call(e){this.callback_.call(this.thisArg_,e);},pushKeyCode_(keyCode){this.keyCodes_.push(keyCode);}};return{HotKey,};});'use strict';Polymer({is:'tv-ui-b-hotkey-controller',created(){this.isAttached_=false;this.globalMode_=false;this.coupledToParentController_=undefined;this.curHost_=undefined;this.childControllers_=[];this.bubblingKeyDownHotKeys_={};this.capturingKeyDownHotKeys_={};this.bubblingKeyPressHotKeys_={};this.capturingKeyPressHotKeys_={};this.onBubblingKeyDown_=this.onKey_.bind(this,false);this.onCapturingKeyDown_=this.onKey_.bind(this,true);this.onBubblingKeyPress_=this.onKey_.bind(this,false);this.onCapturingKeyPress_=this.onKey_.bind(this,true);},attached(){this.isAttached_=true;const host=this.findHost_();if(host.__hotkeyController){throw new Error('Multiple hotkey controllers attached to this host');} +HotKey.prototype={get eventType(){return this.eventType_;},get keyCodes(){return this.keyCodes_;},get helpText(){return this.helpText_;},call(e){this.callback_.call(this.thisArg_,e);},pushKeyCode_(keyCode){this.keyCodes_.push(keyCode);}};return{HotKey,};});'use strict';Polymer({is:'tv-ui-b-hotkey-controller',created(){this.isAttached_=false;this.globalMode_=false;this.slavedToParentController_=undefined;this.curHost_=undefined;this.childControllers_=[];this.bubblingKeyDownHotKeys_={};this.capturingKeyDownHotKeys_={};this.bubblingKeyPressHotKeys_={};this.capturingKeyPressHotKeys_={};this.onBubblingKeyDown_=this.onKey_.bind(this,false);this.onCapturingKeyDown_=this.onKey_.bind(this,true);this.onBubblingKeyPress_=this.onKey_.bind(this,false);this.onCapturingKeyPress_=this.onKey_.bind(this,true);},attached(){this.isAttached_=true;const host=this.findHost_();if(host.__hotkeyController){throw new Error('Multiple hotkey controllers attached to this host');} host.__hotkeyController=this;this.curHost_=host;let parentElement;if(host.parentElement){parentElement=host.parentElement;}else{parentElement=Polymer.dom(host).parentNode.host;} -const parentController=tr.b.getHotkeyControllerForElement(parentElement);if(parentController){this.coupledToParentController_=parentController;parentController.addChildController_(this);return;} -host.addEventListener('keydown',this.onBubblingKeyDown_,false);host.addEventListener('keydown',this.onCapturingKeyDown_,true);host.addEventListener('keypress',this.onBubblingKeyPress_,false);host.addEventListener('keypress',this.onCapturingKeyPress_,true);},detached(){this.isAttached_=false;const host=this.curHost_;if(!host)return;delete host.__hotkeyController;this.curHost_=undefined;if(this.coupledToParentController_){this.coupledToParentController_.removeChildController_(this);this.coupledToParentController_=undefined;return;} +const parentController=tr.b.getHotkeyControllerForElement(parentElement);if(parentController){this.slavedToParentController_=parentController;parentController.addChildController_(this);return;} +host.addEventListener('keydown',this.onBubblingKeyDown_,false);host.addEventListener('keydown',this.onCapturingKeyDown_,true);host.addEventListener('keypress',this.onBubblingKeyPress_,false);host.addEventListener('keypress',this.onCapturingKeyPress_,true);},detached(){this.isAttached_=false;const host=this.curHost_;if(!host)return;delete host.__hotkeyController;this.curHost_=undefined;if(this.slavedToParentController_){this.slavedToParentController_.removeChildController_(this);this.slavedToParentController_=undefined;return;} host.removeEventListener('keydown',this.onBubblingKeyDown_,false);host.removeEventListener('keydown',this.onCapturingKeyDown_,true);host.removeEventListener('keypress',this.onBubblingKeyPress_,false);host.removeEventListener('keypress',this.onCapturingKeyPress_,true);},addChildController_(controller){const i=this.childControllers_.indexOf(controller);if(i!==-1){throw new Error('Controller already registered');} this.childControllers_.push(controller);},removeChildController_(controller){const i=this.childControllers_.indexOf(controller);if(i===-1){throw new Error('Controller not registered');} this.childControllers_.splice(i,1);return controller;},getKeyMapForEventType_(eventType,useCapture){if(eventType==='keydown'){if(!useCapture){return this.bubblingKeyDownHotKeys_;} @@ -7262,7 +7262,7 @@ keyMap[keyCode]=hotKey;} for(let i=0;i<hotKey.keyCodes.length;i++){const keyCode=hotKey.keyCodes[i];delete keyMap[keyCode];} return hotKey;},get globalMode(){return this.globalMode_;},set globalMode(globalMode){const wasAttached=this.isAttached_;if(wasAttached){this.detached();} -this.globalMode_=!!globalMode;if(wasAttached){this.attached();}},get topmostConroller_(){if(this.coupledToParentController_){return this.coupledToParentController_.topmostConroller_;} +this.globalMode_=!!globalMode;if(wasAttached){this.attached();}},get topmostConroller_(){if(this.slavedToParentController_){return this.slavedToParentController_.topmostConroller_;} return this;},childRequestsGeneralFocus(child){const topmost=this.topmostConroller_;if(topmost.curHost_){if(topmost.curHost_.hasAttribute('tabIndex')){topmost.curHost_.focus();}else{if(document.activeElement){document.activeElement.blur();}}}else{if(document.activeElement){document.activeElement.blur();}}},childRequestsBlur(child){child.blur();const topmost=this.topmostConroller_;if(topmost.curHost_){topmost.curHost_.focus();}},findHost_(){if(this.globalMode_)return document.body;if(this.parentElement)return this.parentElement;if(!Polymer.dom(this).parentNode)return this.host;let node=this.parentNode;while(Polymer.dom(node).parentNode)node=Polymer.dom(node).parentNode;return node.host;},appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e){const localKeyMap=this.getKeyMapForEventType_(e.type,useCapture);const localHotKey=localKeyMap[e.keyCode];if(localHotKey){matchedHotKeys.push(localHotKey);} for(let i=0;i<this.childControllers_.length;i++){const controller=this.childControllers_[i];controller.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);}},onKey_(useCapture,e){if(!useCapture&&e.path[0].tagName==='INPUT')return;let sortedControllers;const matchedHotKeys=[];this.appendMatchingHotKeysTo_(matchedHotKeys,useCapture,e);if(matchedHotKeys.length===0)return false;if(matchedHotKeys.length>1){throw new Error('More than one hotKey is currently unsupported');} const hotKey=matchedHotKeys[0];let prevented=0;prevented|=hotKey.call(e);return!prevented&&e.defaultPrevented;}});'use strict';tr.exportTo('tr.b',function(){function getHotkeyControllerForElement(refElement){let curElement=refElement;while(curElement){if(curElement.tagName==='tv-ui-b-hotkey-controller'){return curElement;} @@ -7614,7 +7614,7 @@ const ans={supported:false};for(const proc of Object.values(m.processes)){proc.objects.iterObjectInstances(function(instance){if(instance instanceof BlameContextInstance){ans.supported=true;}});} if(!ans.supported){ans.reason='No frame data available';} return ans;},get currentRangeOfInterest(){if(this.rangeOfInterest_.isEmpty){return this.model_.bounds;} -return this.rangeOfInterest_;},get rangeOfInterest(){return this.rangeOfInterest_;},set rangeOfInterest(rangeOfInterest){this.rangeOfInterest_=rangeOfInterest;this.updateContents_();},get selection(){},set selection(_){},get textLabel(){return'Frame Data';},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();}});tr.ui.side_panel.SidePanelRegistry.register(function(){return document.createElement('tr-ui-e-s-frame-data-side-panel');});});'use strict';Polymer({is:'tr-ui-b-chart-legend-key',ready(){this.$.checkbox.addEventListener('change',this.onCheckboxChange_.bind(this));},onCheckboxChange_(){tr.b.dispatchSimpleEvent(this,tr.ui.b.DataSeriesEnableChangeEventType,true,false,{key:Polymer.dom(this).textContent,enabled:this.enabled});},set textContent(t){Polymer.dom(this.$.label).textContent=t;Polymer.dom(this.$.link).textContent=t;this.updateContents_();},set width(w){w-=20;this.$.link.style.width=w+'px';this.$.label.style.width=w+'px';},get textContent(){return Polymer.dom(this.$.label).textContent;},set optional(optional){this.$.checkbox.style.visibility=optional?'visible':'hidden';},get optional(){return this.$.checkbox.style.visibility==='visible';},set enabled(enabled){this.$.checkbox.checked=enabled?'checked':'';},get enabled(){return this.$.checkbox.checked;},set color(c){this.$.label.style.color=c;this.$.link.color=c;},set target(target){this.$.link.setSelectionAndContent(target,Polymer.dom(this.$.label).textContent);this.updateContents_();},get target(){return this.$.link.selection;},set title(title){this.$.link.title=title;},updateContents_(){this.$.link.style.display=this.target?'':'none';this.$.label.style.display=this.target?'none':'';this.$.label.htmlFor=this.optional?'checkbox':'';}});'use strict';(function(window){window.define=function(x){window.d3=x;};window.define.amd=true;})(this);!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function d(){Xo.event.preventDefault()}function m(){for(var n,t=Xo.event;n=t.sourceEvent;)t=n;return t}function y(n){for(var t=new p,e=0,r=arguments.length;++e<r;)t[arguments[e]]=v(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=Xo.event;u.target=n,Xo.event=u,t[u.type].apply(e,r)}finally{Xo.event=i}}},t}function x(n){return fa(n,da),n}function M(n){return"function"==typeof n?n:function(){return ha(n,this)}}function _(n){return"function"==typeof n?n:function(){return ga(n,this)}}function b(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=Xo.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function w(n){return n.trim().replace(/\s+/g," ")}function S(n){return new RegExp("(?:^|\\s+)"+Xo.requote(n)+"(?:\\s+|$)","g")}function k(n){return n.trim().split(/^|\s+/)}function E(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=k(n).map(A);var u=n.length;return"function"==typeof t?r:e}function A(n){var t=S(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",w(u+" "+n))):e.setAttribute("class",w(u.replace(t," ")))}}function C(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function N(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function L(n){return"function"==typeof n?n:(n=Xo.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function T(n){return{__data__:n}}function q(n){return function(){return va(this,n)}}function z(n){return arguments.length||(n=Xo.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function R(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function U(){var n=this.__transition__;n&&++n.active}function j(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=c(t,Bo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+Xo.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),c=H;a>0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o,a=0,c=0,s=0;if(u=/([a-z]+)\((.*)\)/i.exec(n))switch(i=u[2].split(","),u[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Mt(i[0]),Mt(i[1]),Mt(i[2]))}return(o=Va.get(n))?t(o.r,o.g,o.b):(null!=n&&"#"===n.charAt(0)&&(r=parseInt(n.substring(1),16),isNaN(r)||(4===n.length?(a=(3840&r)>>4,a=a>>4|a,c=240&r,c=c>>4|c,s=15&r,s=s<<4|s):7===n.length&&(a=(16711680&r)>>16,c=(65280&r)>>8,s=255&r))),t(a,c,s))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return $a=n,e}function Nt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Lt(n,t){var e=Math.pow(10,3*oa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Tt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function zt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=zt;var r=new zt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=zt;var r=new zt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=uc[e=n.charAt(++a)])&&(e=n.charAt(++a)),(i=C[e])&&(e=i(t,null==u?"e"===e?" ":"0":u)),o.push(e),c=a+1);return o.push(n.substring(c,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},u=e(r,n,t,0);if(u!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var i=null!=r.Z&&ec!==zt,o=new(i?zt:ec);return"j"in r?o.setFullYear(r.y,0,r.j):"w"in r&&("W"in r||"U"in r)?(o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+Math.floor(r.Z/100),r.M+r.Z%100,r.S,r.L),i?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=zt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=zt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function Ft(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ot(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Yt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function It(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Zt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.y=Xt(+r[0]),e+r[0].length):-1}function Vt(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Xt(n){return n+(n>68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function re(){}function ue(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function ie(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function oe(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function ae(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)oe(n[e],t,1);t.polygonEnd()}function ce(){function n(n,t){n*=Na,t=t*Na/2+Sa/4;var e=n-r,o=e>=0?1:-1,a=o*e,c=Math.cos(t),s=Math.sin(t),l=i*s,f=u*c+l*Math.cos(a),h=l*o*Math.sin(a);hc.add(Math.atan2(h,f)),r=n,u=c,i=s}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*Na,u=Math.cos(a=(e=a)*Na/2+Sa/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function se(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function le(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function fe(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function they(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ge(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function pe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function ve(n){return[Math.atan2(n[1],n[0]),X(n[2])]}function de(n,t){return oa(n[0]-t[0])<Aa&&oa(n[1]-t[1])<Aa}function me(n,t){n*=Na;var e=Math.cos(t*=Na);ye(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function ye(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function xe(){function n(n,u){n*=Na;var i=Math.cos(u*=Na),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),ye(t,e,r)}var t,e,r;kc.point=function(u,i){u*=Na;var o=Math.cos(i*=Na);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,ye(t,e,r)}}function Me(){kc.point=me}function _e(){function n(n,t){n*=Na;var e=Math.cos(t*=Na),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-V(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),ye(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=Na;var c=Math.cos(a*=Na);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),ye(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Me,kc.point=me}}function be(){return!0}function we(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(de(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function ke(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Ee(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Sa,k=p*x;if(hc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*ka:_,S^h>=e^m>=e){var E=fe(se(f),se(n));pe(E);var A=fe(u,E);pe(A);var C=(S^_>=0?-1:1)*X(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function Te(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)<Aa?(n.point(e,r=(r+o)/2>0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)<Aa&&(e-=u*Aa),oa(i-a)<Aa&&(i-=a*Aa),r=qe(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function qe(n,t,e,r){var u,i,o=Math.sin(n-e);return oa(o)>Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function ze(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]<t[0]?Sa:-Sa;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function Re(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);they(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(they(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)<Aa,N=C||Aa>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)<Aa?k:E):k<=_[1]&&_[1]<=E:A>Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return they(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)<Aa?u>0?0:3:oa(r[0]-e)<Aa?u>0?2:1:oa(r[1]-t)<Aa?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function They(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)<Aa||oa(r-h)<Aa?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||oa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)<Aa?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Sa/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=I(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ea]},e):xr}function yr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return oa(u)<Aa?er:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-I(u)*Math.sqrt(n*n+e*e)]},e)}function xr(n,t){return[n,Math.log(Math.tan(Sa/4+t/2))]}function Mr(n){var t,e=Qe(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=Sa*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function _r(n,t){return[Math.log(Math.tan(Sa/4+t/2)),-n]}function br(n){return n[0]}function wr(n){return n[1]}function Sr(n){for(var t=n.length,e=[0,1],r=2,u=2;t>u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function Tr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Tr(n);for(var c=i;c.circle&&oa(e-c.circle.x)<Aa&&oa(r-c.circle.cy)<Aa;)i=c.P,a.unshift(c),Tr(c),c=i;a.unshift(c),Or(c);for(var s=o;s.circle&&oa(e-s.circle.x)<Aa&&oa(r-s.circle.cy)<Aa;)o=s.N,a.push(s),Tr(s),s=o;a.push(s),Or(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function zr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)<Aa&&p-u>Aa?{x:f,y:oa(t-f)<Aa?e:p}:oa(u-p)<Aa&&h-r>Aa?{x:oa(e-p)<Aa?t:h,y:p}:oa(r-h)<Aa&&u-g>Aa?{x:h,y:oa(t-h)<Aa?e:g}:oa(u-g)<Aa&&r-f>Aa?{x:oa(e-g)<Aa?t:f,y:g}:null),i.site,null)),++c)}function jr(n,t){return t.angle-n.angle}function Hr(){Jr(this),this.x=this.y=this.arc=this.site=this.cy=null}function Fr(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function Or(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),Jr(t),n.circle=null)}function Yr(n){for(var t,e=Vc,r=De(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!Ir(t,n)||!r(t)||oa(t.a.x-t.b.x)<Aa&&oa(t.a.y-t.b.y)<Aa)&&(t.a=t.b=null,e.splice(u,1))}function Ir(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2;if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function Zr(n,t){this.l=n,this.r=t,this.a=this.b=null}function Vr(n,t,e,r){var u=new Zr(n,t);return Vc.push(u),e&&$r(u,n,t,e),r&&$r(u,t,n,r),Xc[n.i].edges.push(new Br(u,n,t)),Xc[t.i].edges.push(new Br(u,t,n)),u}function Xr(n,t,e){var r=new Zr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function $r(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function Br(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function Wr(){this._=null}function Jr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function Gr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function Kr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Qr(n){for(;n.L;)n=n.L;return n}function nu(n,t){var e,r,u,i=n.sort(tu).pop();for(Vc=[],Xc=new Array(n.length),$c=new Wr,Wc=new Wr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new Pr(i),zr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;qr(u.arc)}t&&(Yr(t),Ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function tu(n,t){return t.y-n.y||t.x-n.x}function eu(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function ru(n){return n.x}function uu(n){return n.y}function iu(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function ou(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&ou(n,c[0],e,r,o,a),c[1]&&ou(n,c[1],o,r,u,a),c[2]&&ou(n,c[2],e,a,o,i),c[3]&&ou(n,c[3],o,a,u,i)}}function au(n,t){n=Xo.rgb(n),t=Xo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+vt(Math.round(e+i*n))+vt(Math.round(r+o*n))+vt(Math.round(u+a*n))}}function cu(n,t){var e,r={},u={};for(e in n)e in t?r[e]=fu(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function su(n,t){return t-=n=+n,function(e){return n+t*e}}function lu(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=Tu(t,e),i=qu(zu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*La,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*La:0}function Tu(n,t){return n[0]*t[0]+n[1]*t[1]}function qu(n){var t=Math.sqrt(Tu(n,n));return t&&(n[0]/=t,n[1]/=t),t}function zu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ru(n,t){var e,r=[],u=[],i=Xo.transform(n),o=Xo.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:su(a[0],c[0])},{i:3,x:su(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function Du(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function Pu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function Uu(n){for(var t=n.source,e=n.target,r=Hu(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function ju(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Hu(n,t){if(n===t)return n;for(var e=ju(n),r=ju(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function Fu(n){n.fixed|=2}function Ou(n){n.fixed&=-7}function Yu(n){n.fixed|=4,n.px=n.x,n.py=n.y}function Iu(n){n.fixed&=-5}function Zu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(Zu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function Vu(n,t){return Xo.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=Wu,n}function Xu(n){return n.children}function $u(n){return n.value}function Bu(n,t){return t.value-n.value}function Wu(n){return Xo.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function Ju(n){return n.x}function Gu(n){return n.y}function Ku(n,t,e){n.y0=t,n.y=e}function Qu(n){return Xo.range(n.length)}function ni(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function ti(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=li(e[i],t),n)>0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function vi(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Mi(r,u=a):Mi(r=c,u),o--):(xi(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)ki(u[i],t,e,r)}function Ei(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Ai(n){return 1+Xo.max(n,function(n){return n.y})}function Ci(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ni(n){var t=n.children;return t&&t.length?Ni(t[0]):n}function Li(n){var t,e=n.children;return e&&(t=e.length)?Li(e[t-1]):n}function Ti(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function qi(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function zi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():zi(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=Xo.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function Hi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=zi(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=zi(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return zi(t.a[0])},e.copy=function(){return Ji(n,t)},e.domain(n)}function Gi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=Xo.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[Xo.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort(Xo.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return Gi(n,t)},e()}function Ki(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=br,r=wr,u=be,i=oo,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=Ms.get(n)||oo).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function oo(n){return n.join("L")}function ao(n){return oo(n)+"Z"}function co(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function lo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function fo(n,t){return n.length<4?oo(n):n[1]+po(n.slice(1,n.length-1),vo(n,t))}function ho(n,t){return n.length<3?oo(n):n[0]+po((n.push(n[0]),n),vo([n[n.length-2]].concat(n,[n[1]]),t))}function go(n,t){return n.length<3?oo(n):n[0]+po(n,vo(n,t))}function po(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return oo(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function vo(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function mo(n){if(n.length<3)return oo(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",_o(ws,o),",",_o(ws,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),bo(c,o,a);return n.pop(),c.push("L",r),c.join("")}function yo(n){if(n.length<4)return oo(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(_o(ws,i)+","+_o(ws,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),bo(e,i,o);return e.join("")}function xo(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[_o(ws,o),",",_o(ws,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),bo(t,o,a);return t.join("")}function Mo(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return mo(n)}function _o(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function bo(n,t,e){n.push("C",_o(_s,t),",",_o(_s,e),",",_o(bs,t),",",_o(bs,e),",",_o(ws,t),",",_o(ws,e))}function wo(n,t){return(t[1]-n[1])/(t[0]-n[0])}function So(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=wo(u,i);++t<e;)r[t]=(o+(o=wo(u=i,i=n[t+1])))/2;return r[t]=o,r}function ko(n){for(var t,e,r,u,i=[],o=So(n),a=-1,c=n.length-1;++a<c;)t=wo(n[a],n[a+1]),oa(t)<Aa?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ys,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Co(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=_t(e),_=_t(u),b=e===r?function(){return g}:_t(r),w=u===i?function(){return p}:_t(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=br,r=br,u=0,i=wr,o=be,a=oo,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=Ms.get(n)||oo).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function No(n){return n.radius}function Lo(n){return[n.x,n.y]}function To(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ys;return[e*Math.cos(r),e*Math.sin(r)]}}function qo(){return 64}function zo(){return"circle"}function Ro(n){var t=Math.sqrt(n/Sa);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Do(n,t){return fa(n,Ns),n.id=t,n}function Po(n,t,e,r){var u=n.id;return R(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function Uo(n){return null==n&&(n=""),function(){this.textContent=n}}function jo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,Xo.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]<js[i]/u?i-1:i]:[Os,Yi(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=zi(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=zi(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.3"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},Xo.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},Xo.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r;return i?u+i*(n[r]-u):u},Xo.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var ia=Xo.bisector(function(n){return n});Xo.bisectLeft=ia.left,Xo.bisect=Xo.bisectRight=ia.right,Xo.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},Xo.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},Xo.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},Xo.transpose=function(n){return Xo.zip.apply(Xo,n)},Xo.keys=function(n){var t=[];for(var e in n)t.push(e);return t},Xo.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},Xo.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},Xo.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},Xo.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:i,get:function(n){return this[aa+n]},set:function(n,t){return this[aa+n]=t},remove:o,keys:a,values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1),this[t])}});var aa="\x00",ca=aa.charCodeAt(0);Xo.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=f(n,t,t[e]);return n};var sa=["webkit","ms","moz","Moz","o","O"];Xo.dispatch=function(){for(var n=new p,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=v(n);return n},p.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=Sizzle,va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return x(i)},da.selectAll=function(n){var t,e,r=[];n=_(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Bo(n.call(e,e.__data__,a,u))),t.parentNode=e);return x(r)};var ma={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};Xo.ns={prefix:ma,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!S(n[u]).test(t))return!1;return!0}for(t in n)this.each(E(t,n[t]));return this}return this.each(E(n,t))},da.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=T(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=T(o);for(;f>r;++r)p[r]=T(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=D([]),s=x([]),l=x([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},da.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},da.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=z.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},da.each=function(n){return R(this,function(t,e,r){n.call(t,t.__data__,e,r)})},da.call=function(n){var t=Bo(arguments);return n.apply(t[0]=this,t),this},da.empty=function(){return!this.node()},da.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return x(o)},ya.insert=function(n,t){return arguments.length<2&&(t=P(this)),da.insert.call(this,n,t)},da.transition=function(){for(var n,t,e=ks||++Ls,r=[],u=Es||{time:Date.now(),ease:yu,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&jo(t,c,e,u),n.push(t)}return Do(r,e)},da.interrupt=function(){return this.each(U)},Xo.select=function(n){var t=["string"==typeof n?ha(n,Wo):n];return t.parentNode=Jo,x([t])},Xo.selectAll=function(n){var t=Bo("string"==typeof n?ga(n,Wo):n);return t.parentNode=Jo,x([t])};var xa=Xo.select(Jo);da.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,Ta=Math.SQRT2,qa=2,za=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(Ta*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(Ta*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+za*f)/(2*i*qa*h),p=(c*c-i*i-za*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Ta;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=T.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=T.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=T.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=T.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",T=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=T.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,T,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<s;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;s>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv(" ","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;zt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:Tt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)ie(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){oe(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)oe(e[r],t,0)},Polygon:function(n,t){ae(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)ae(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)ie(e[r],t)}};Xo.geo.area=function(n){return fc=0,Xo.geo.stream(n,gc),fc};var fc,hc=new re,gc={sphere:function(){fc+=4*Sa},point:g,lineStart:g,lineEnd:g,polygonStart:function(){hc.reset(),gc.lineStart=ce},polygonEnd:function(){var n=2*hc;fc+=0>n?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,Te,ze,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(They)}).raw=They,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t<l.length-h;++t)g.push(n[a[l[t]][2]]);return g}var e=br,r=wr;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Xo.geom.polygon=function(n){return fa(n,Zc),n};var Zc=Xo.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=Cr(n),c=-1,s=this.length-Cr(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Er(o,l,u)?(Er(i,l,u)||n.push(Ar(i,o,l,u)),n.push(o)):Er(i,l,u)&&n.push(Ar(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];Pr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(jr),t.length},Br.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},Wr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Qr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(Gr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Kr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(Kr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Gr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Qr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,Gr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,Kr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,Gr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,Kr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,Gr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,Kr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},Xo.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return nu(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&eu(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=_t(r=n),t):r},t.y=function(n){return arguments.length?(o=_t(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];Xo.geom.delaunay=function(n){return Xo.geom.voronoi().triangles(n)},Xo.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(oa(c-e)+oa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=br,c=wr;return(o=arguments.length)?(a=ru,c=uu,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},Xo.interpolateRgb=au,Xo.interpolateObject=cu,Xo.interpolateNumber=su,Xo.interpolateString=lu;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;Xo.interpolate=fu,Xo.interpolators=[function(n,t){var e=typeof t;return("string"===e?Va.has(t)||/^(#|rgb\(|hsl\()/.test(t)?au:lu:t instanceof G?au:"object"===e?Array.isArray(t)?hu:cu:su)(n,t)}],Xo.interpolateArray=hu;var ns=function(){return bt},ts=Xo.map({linear:ns,poly:xu,quad:function(){return du},cubic:function(){return mu},sin:function(){return Mu},exp:function(){return _u},circle:function(){return bu},elastic:wu,back:Su,bounce:function(){return ku}}),es=Xo.map({"in":bt,out:pu,"in-out":vu,"out-in":function(n){return vu(pu(n))}});Xo.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Uu(n[e]));return t}},Xo.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=Xo.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push(Xo.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(ka-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},Xo.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=u-e,c=i*i+o*o;if(c>a*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=m.length,l=y.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=Bu,u=Xu,i=$u;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},Xo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=Xo.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},Vu(e,r)},Xo.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/Xo.sum(o),s=Xo.range(i.length);null!=e&&s.sort(e===as?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=as,r=0,u=ka;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var as={};Xo.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=Xo.permute(s,f),l=Xo.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;vi(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=si(a),i=ci(i),a&&i;)c=ci(c),o=si(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=Xo.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ti,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ti(t):qi(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return qi(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ti:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},Vu(i,a)},Xo.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[])},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(To(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=zo,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),jo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return Do(i,u)},Ns.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=_(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&jo(u,g,o,i),t.push(u)}return Do(a,o)},Ns.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=Ts,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":Ts,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ts="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return zs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=f[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=Ri(t),c=a[0],s=a[1],p=L[e],v=e?f:l,d=v[1]-v[0];return C&&(c-=p,s-=d+p),r=(e?g:h)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=p)+d:(x&&(p=Math.max(c,Math.min(s,2*x[e]-r))),r>p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),T=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?T.on("touchmove.brush",v).on("touchend.brush",y):T.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],f[1-z]-L[1]],L[0]=l[q],L[1]=f[z]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var zs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(Math.ceil(n/e)*e,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}();'use strict';(function(window){window.define=undefined;}).call(this,this);'use strict';tr.exportTo('tr.ui.b',function(){const DataSeriesEnableChangeEventType='data-series-enabled-change';const THIS_DOC=document.currentScript.ownerDocument;const svgNS='http://www.w3.org/2000/svg';const ColorScheme=tr.b.ColorScheme;function getColorOfKey(key,selected){let id=ColorScheme.getColorIdForGeneralPurposeString(key);if(selected){id+=ColorScheme.properties.brightenedOffsets[0];} +return this.rangeOfInterest_;},get rangeOfInterest(){return this.rangeOfInterest_;},set rangeOfInterest(rangeOfInterest){this.rangeOfInterest_=rangeOfInterest;this.updateContents_();},get selection(){},set selection(_){},get textLabel(){return'Frame Data';},get model(){return this.model_;},set model(model){this.model_=model;this.updateContents_();}});tr.ui.side_panel.SidePanelRegistry.register(function(){return document.createElement('tr-ui-e-s-frame-data-side-panel');});});'use strict';Polymer({is:'tr-ui-b-chart-legend-key',ready(){this.$.checkbox.addEventListener('change',this.onCheckboxChange_.bind(this));},onCheckboxChange_(){tr.b.dispatchSimpleEvent(this,tr.ui.b.DataSeriesEnableChangeEventType,true,false,{key:Polymer.dom(this).textContent,enabled:this.enabled});},set textContent(t){Polymer.dom(this.$.label).textContent=t;Polymer.dom(this.$.link).textContent=t;this.updateContents_();},set width(w){w-=20;this.$.link.style.width=w+'px';this.$.label.style.width=w+'px';},get textContent(){return Polymer.dom(this.$.label).textContent;},set optional(optional){this.$.checkbox.style.visibility=optional?'visible':'hidden';},get optional(){return this.$.checkbox.style.visibility==='visible';},set enabled(enabled){this.$.checkbox.checked=enabled?'checked':'';},get enabled(){return this.$.checkbox.checked;},set color(c){this.$.label.style.color=c;this.$.link.color=c;},set target(target){this.$.link.setSelectionAndContent(target,Polymer.dom(this.$.label).textContent);this.updateContents_();},get target(){return this.$.link.selection;},set title(title){this.$.link.title=title;},updateContents_(){this.$.link.style.display=this.target?'':'none';this.$.label.style.display=this.target?'none':'';this.$.label.htmlFor=this.optional?'checkbox':'';}});'use strict';(function(window){window.define=function(x){window.d3=x;};window.define.amd=true;})(this);!function(){function n(n){return null!=n&&!isNaN(n)}function t(n){return n.length}function e(n){for(var t=1;n*t%1;)t*=10;return t}function r(n,t){try{for(var e in t)Object.defineProperty(n.prototype,e,{value:t[e],enumerable:!1})}catch(r){n.prototype=t}}function u(){}function i(n){return aa+n in this}function o(n){return n=aa+n,n in this&&delete this[n]}function a(){var n=[];return this.forEach(function(t){n.push(t)}),n}function c(){var n=0;for(var t in this)t.charCodeAt(0)===ca&&++n;return n}function s(){for(var n in this)if(n.charCodeAt(0)===ca)return!1;return!0}function l(){}function f(n,t,e){return function(){var r=e.apply(t,arguments);return r===t?n:r}}function h(n,t){if(t in n)return t;t=t.charAt(0).toUpperCase()+t.substring(1);for(var e=0,r=sa.length;r>e;++e){var u=sa[e]+t;if(u in n)return u}}function g(){}function p(){}function v(n){function t(){for(var t,r=e,u=-1,i=r.length;++u<i;)(t=r[u].on)&&t.apply(this,arguments);return n}var e=[],r=new u;return t.on=function(t,u){var i,o=r.get(t);return arguments.length<2?o&&o.on:(o&&(o.on=null,e=e.slice(0,i=e.indexOf(o)).concat(e.slice(i+1)),r.remove(t)),u&&e.push(r.set(t,{on:u})),n)},t}function d(){Xo.event.preventDefault()}function m(){for(var n,t=Xo.event;n=t.sourceEvent;)t=n;return t}function y(n){for(var t=new p,e=0,r=arguments.length;++e<r;)t[arguments[e]]=v(t);return t.of=function(e,r){return function(u){try{var i=u.sourceEvent=Xo.event;u.target=n,Xo.event=u,t[u.type].apply(e,r)}finally{Xo.event=i}}},t}function x(n){return fa(n,da),n}function M(n){return"function"==typeof n?n:function(){return ha(n,this)}}function _(n){return"function"==typeof n?n:function(){return ga(n,this)}}function b(n,t){function e(){this.removeAttribute(n)}function r(){this.removeAttributeNS(n.space,n.local)}function u(){this.setAttribute(n,t)}function i(){this.setAttributeNS(n.space,n.local,t)}function o(){var e=t.apply(this,arguments);null==e?this.removeAttribute(n):this.setAttribute(n,e)}function a(){var e=t.apply(this,arguments);null==e?this.removeAttributeNS(n.space,n.local):this.setAttributeNS(n.space,n.local,e)}return n=Xo.ns.qualify(n),null==t?n.local?r:e:"function"==typeof t?n.local?a:o:n.local?i:u}function w(n){return n.trim().replace(/\s+/g," ")}function S(n){return new RegExp("(?:^|\\s+)"+Xo.requote(n)+"(?:\\s+|$)","g")}function k(n){return n.trim().split(/^|\s+/)}function E(n,t){function e(){for(var e=-1;++e<u;)n[e](this,t)}function r(){for(var e=-1,r=t.apply(this,arguments);++e<u;)n[e](this,r)}n=k(n).map(A);var u=n.length;return"function"==typeof t?r:e}function A(n){var t=S(n);return function(e,r){if(u=e.classList)return r?u.add(n):u.remove(n);var u=e.getAttribute("class")||"";r?(t.lastIndex=0,t.test(u)||e.setAttribute("class",w(u+" "+n))):e.setAttribute("class",w(u.replace(t," ")))}}function C(n,t,e){function r(){this.style.removeProperty(n)}function u(){this.style.setProperty(n,t,e)}function i(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(n):this.style.setProperty(n,r,e)}return null==t?r:"function"==typeof t?i:u}function N(n,t){function e(){delete this[n]}function r(){this[n]=t}function u(){var e=t.apply(this,arguments);null==e?delete this[n]:this[n]=e}return null==t?e:"function"==typeof t?u:r}function L(n){return"function"==typeof n?n:(n=Xo.ns.qualify(n)).local?function(){return this.ownerDocument.createElementNS(n.space,n.local)}:function(){return this.ownerDocument.createElementNS(this.namespaceURI,n)}}function T(n){return{__data__:n}}function q(n){return function(){return va(this,n)}}function z(n){return arguments.length||(n=Xo.ascending),function(t,e){return t&&e?n(t.__data__,e.__data__):!t-!e}}function R(n,t){for(var e=0,r=n.length;r>e;e++)for(var u,i=n[e],o=0,a=i.length;a>o;o++)(u=i[o])&&t(u,o,e);return n}function D(n){return fa(n,ya),n}function P(n){var t,e;return function(r,u,i){var o,a=n[i].update,c=a.length;for(i!=e&&(e=i,t=0),u>=t&&(t=u+1);!(o=a[t])&&++t<c;);return o}}function U(){var n=this.__transition__;n&&++n.active}function j(n,t,e){function r(){var t=this[o];t&&(this.removeEventListener(n,t,t.$),delete this[o])}function u(){var u=c(t,Bo(arguments));r.call(this),this.addEventListener(n,this[o]=u,u.$=e),u._=t}function i(){var t,e=new RegExp("^__on([^.]+)"+Xo.requote(n)+"$");for(var r in this)if(t=r.match(e)){var u=this[r];this.removeEventListener(t[1],u,u.$),delete this[r]}}var o="__on"+n,a=n.indexOf("."),c=H;a>0&&(n=n.substring(0,a));var s=Ma.get(n);return s&&(n=s,c=F),a?t?u:r:t?g:i}function H(n,t){return function(e){var r=Xo.event;Xo.event=e,t[0]=this.__data__;try{n.apply(this,t)}finally{Xo.event=r}}}function F(n,t){var e=H(n,t);return function(n){var t=this,r=n.relatedTarget;r&&(r===t||8&r.compareDocumentPosition(t))||e.call(t,n)}}function O(){var n=".dragsuppress-"+ ++ba,t="click"+n,e=Xo.select(Go).on("touchmove"+n,d).on("dragstart"+n,d).on("selectstart"+n,d);if(_a){var r=Jo.style,u=r[_a];r[_a]="none"}return function(i){function o(){e.on(t,null)}e.on(n,null),_a&&(r[_a]=u),i&&(e.on(t,function(){d(),o()},!0),setTimeout(o,0))}}function Y(n,t){t.changedTouches&&(t=t.changedTouches[0]);var e=n.ownerSVGElement||n;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>wa&&(Go.scrollX||Go.scrollY)){e=Xo.select("body").append("svg").style({position:"absolute",top:0,left:0,margin:0,padding:0,border:"none"},"important");var u=e[0][0].getScreenCTM();wa=!(u.f||u.e),e.remove()}return wa?(r.x=t.pageX,r.y=t.pageY):(r.x=t.clientX,r.y=t.clientY),r=r.matrixTransform(n.getScreenCTM().inverse()),[r.x,r.y]}var i=n.getBoundingClientRect();return[t.clientX-i.left-n.clientLeft,t.clientY-i.top-n.clientTop]}function I(n){return n>0?1:0>n?-1:0}function Z(n,t,e){return(t[0]-n[0])*(e[1]-n[1])-(t[1]-n[1])*(e[0]-n[0])}function V(n){return n>1?0:-1>n?Sa:Math.acos(n)}function X(n){return n>1?Ea:-1>n?-Ea:Math.asin(n)}function $(n){return((n=Math.exp(n))-1/n)/2}function B(n){return((n=Math.exp(n))+1/n)/2}function W(n){return((n=Math.exp(2*n))-1)/(n+1)}function J(n){return(n=Math.sin(n/2))*n}function G(){}function K(n,t,e){return new Q(n,t,e)}function Q(n,t,e){this.h=n,this.s=t,this.l=e}function nt(n,t,e){function r(n){return n>360?n-=360:0>n&&(n+=360),60>n?i+(o-i)*n/60:180>n?o:240>n?i+(o-i)*(240-n)/60:i}function u(n){return Math.round(255*r(n))}var i,o;return n=isNaN(n)?0:(n%=360)<0?n+360:n,t=isNaN(t)?0:0>t?0:t>1?1:t,e=0>e?0:e>1?1:e,o=.5>=e?e*(1+t):e+t-e*t,i=2*e-o,gt(u(n+120),u(n),u(n-120))}function tt(n,t,e){return new et(n,t,e)}function et(n,t,e){this.h=n,this.c=t,this.l=e}function rt(n,t,e){return isNaN(n)&&(n=0),isNaN(t)&&(t=0),ut(e,Math.cos(n*=Na)*t,Math.sin(n)*t)}function ut(n,t,e){return new it(n,t,e)}function it(n,t,e){this.l=n,this.a=t,this.b=e}function ot(n,t,e){var r=(n+16)/116,u=r+t/500,i=r-e/200;return u=ct(u)*Fa,r=ct(r)*Oa,i=ct(i)*Ya,gt(lt(3.2404542*u-1.5371385*r-.4985314*i),lt(-.969266*u+1.8760108*r+.041556*i),lt(.0556434*u-.2040259*r+1.0572252*i))}function at(n,t,e){return n>0?tt(Math.atan2(e,t)*La,Math.sqrt(t*t+e*e),n):tt(0/0,0/0,n)}function ct(n){return n>.206893034?n*n*n:(n-4/29)/7.787037}function st(n){return n>.008856?Math.pow(n,1/3):7.787037*n+4/29}function lt(n){return Math.round(255*(.00304>=n?12.92*n:1.055*Math.pow(n,1/2.4)-.055))}function ft(n){return gt(n>>16,255&n>>8,255&n)}function ht(n){return ft(n)+""}function gt(n,t,e){return new pt(n,t,e)}function pt(n,t,e){this.r=n,this.g=t,this.b=e}function vt(n){return 16>n?"0"+Math.max(0,n).toString(16):Math.min(255,n).toString(16)}function dt(n,t,e){var r,u,i,o,a=0,c=0,s=0;if(u=/([a-z]+)\((.*)\)/i.exec(n))switch(i=u[2].split(","),u[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return t(Mt(i[0]),Mt(i[1]),Mt(i[2]))}return(o=Va.get(n))?t(o.r,o.g,o.b):(null!=n&&"#"===n.charAt(0)&&(r=parseInt(n.substring(1),16),isNaN(r)||(4===n.length?(a=(3840&r)>>4,a=a>>4|a,c=240&r,c=c>>4|c,s=15&r,s=s<<4|s):7===n.length&&(a=(16711680&r)>>16,c=(65280&r)>>8,s=255&r))),t(a,c,s))}function mt(n,t,e){var r,u,i=Math.min(n/=255,t/=255,e/=255),o=Math.max(n,t,e),a=o-i,c=(o+i)/2;return a?(u=.5>c?a/(o+i):a/(2-o-i),r=n==o?(t-e)/a+(e>t?6:0):t==o?(e-n)/a+2:(n-t)/a+4,r*=60):(r=0/0,u=c>0&&1>c?0:r),K(r,u,c)}function yt(n,t,e){n=xt(n),t=xt(t),e=xt(e);var r=st((.4124564*n+.3575761*t+.1804375*e)/Fa),u=st((.2126729*n+.7151522*t+.072175*e)/Oa),i=st((.0193339*n+.119192*t+.9503041*e)/Ya);return ut(116*u-16,500*(r-u),200*(u-i))}function xt(n){return(n/=255)<=.04045?n/12.92:Math.pow((n+.055)/1.055,2.4)}function Mt(n){var t=parseFloat(n);return"%"===n.charAt(n.length-1)?Math.round(2.55*t):t}function _t(n){return"function"==typeof n?n:function(){return n}}function bt(n){return n}function wt(n){return function(t,e,r){return 2===arguments.length&&"function"==typeof e&&(r=e,e=null),St(t,e,n,r)}}function St(n,t,e,r){function u(){var n,t=c.status;if(!t&&c.responseText||t>=200&&300>t||304===t){try{n=e.call(i,c)}catch(r){return o.error.call(i,r),void 0}o.load.call(i,n)}else o.error.call(i,c)}var i={},o=Xo.dispatch("beforesend","progress","load","error"),a={},c=new XMLHttpRequest,s=null;return!Go.XDomainRequest||"withCredentials"in c||!/^(http(s)?:)?\/\//.test(n)||(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=u:c.onreadystatechange=function(){c.readyState>3&&u()},c.onprogress=function(n){var t=Xo.event;Xo.event=n;try{o.progress.call(i,c)}finally{Xo.event=t}},i.header=function(n,t){return n=(n+"").toLowerCase(),arguments.length<2?a[n]:(null==t?delete a[n]:a[n]=t+"",i)},i.mimeType=function(n){return arguments.length?(t=null==n?null:n+"",i):t},i.responseType=function(n){return arguments.length?(s=n,i):s},i.response=function(n){return e=n,i},["get","post"].forEach(function(n){i[n]=function(){return i.send.apply(i,[n].concat(Bo(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),c.open(e,n,!0),null==t||"accept"in a||(a.accept=t+",*/*"),c.setRequestHeader)for(var l in a)c.setRequestHeader(l,a[l]);return null!=t&&c.overrideMimeType&&c.overrideMimeType(t),null!=s&&(c.responseType=s),null!=u&&i.on("error",u).on("load",function(n){u(null,n)}),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},Xo.rebind(i,o,"on"),null==r?i:i.get(kt(r))}function kt(n){return 1===n.length?function(t,e){n(null==t?e:null)}:n}function Et(){var n=At(),t=Ct()-n;t>24?(isFinite(t)&&(clearTimeout(Wa),Wa=setTimeout(Et,t)),Ba=0):(Ba=1,Ga(Et))}function At(){var n=Date.now();for(Ja=Xa;Ja;)n>=Ja.t&&(Ja.f=Ja.c(n-Ja.t)),Ja=Ja.n;return n}function Ct(){for(var n,t=Xa,e=1/0;t;)t.f?t=n?n.n=t.n:Xa=t.n:(t.t<e&&(e=t.t),t=(n=t).n);return $a=n,e}function Nt(n,t){return t-(n?Math.ceil(Math.log(n)/Math.LN10):1)}function Lt(n,t){var e=Math.pow(10,3*oa(8-t));return{scale:t>8?function(n){return n/e}:function(n){return n*e},symbol:n}}function Tt(n){var t=n.decimal,e=n.thousands,r=n.grouping,u=n.currency,i=r?function(n){for(var t=n.length,u=[],i=0,o=r[0];t>0&&o>0;)u.push(n.substring(t-=o,t+o)),o=r[i=(i+1)%r.length];return u.reverse().join(e)}:bt;return function(n){var e=Qa.exec(n),r=e[1]||" ",o=e[2]||">",a=e[3]||"",c=e[4]||"",s=e[5],l=+e[6],f=e[7],h=e[8],g=e[9],p=1,v="",d="",m=!1;switch(h&&(h=+h.substring(1)),(s||"0"===r&&"="===o)&&(s=r="0",o="=",f&&(l-=Math.floor((l-1)/4))),g){case"n":f=!0,g="g";break;case"%":p=100,d="%",g="f";break;case"p":p=100,d="%",g="r";break;case"b":case"o":case"x":case"X":"#"===c&&(v="0"+g.toLowerCase());case"c":case"d":m=!0,h=0;break;case"s":p=-1,g="r"}"$"===c&&(v=u[0],d=u[1]),"r"!=g||h||(g="g"),null!=h&&("g"==g?h=Math.max(1,Math.min(21,h)):("e"==g||"f"==g)&&(h=Math.max(0,Math.min(20,h)))),g=nc.get(g)||qt;var y=s&&f;return function(n){var e=d;if(m&&n%1)return"";var u=0>n||0===n&&0>1/n?(n=-n,"-"):a;if(0>p){var c=Xo.formatPrefix(n,h);n=c.scale(n),e=c.symbol+d}else n*=p;n=g(n,h);var x=n.lastIndexOf("."),M=0>x?n:n.substring(0,x),_=0>x?"":t+n.substring(x+1);!s&&f&&(M=i(M));var b=v.length+M.length+_.length+(y?0:u.length),w=l>b?new Array(b=l-b+1).join(r):"";return y&&(M=i(w+M)),u+=v,n=M+_,("<"===o?u+n+w:">"===o?w+u+n:"^"===o?w.substring(0,b>>=1)+u+n+w.substring(b):u+(y?n:w+n))+e}}}function qt(n){return n+""}function zt(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Rt(n,t,e){function r(t){var e=n(t),r=i(e,1);return r-t>t-e?e:r}function u(e){return t(e=n(new ec(e-1)),1),e}function i(n,e){return t(n=new ec(+n),e),n}function o(n,r,i){var o=u(n),a=[];if(i>1)for(;r>o;)e(o)%i||a.push(new Date(+o)),t(o,1);else for(;r>o;)a.push(new Date(+o)),t(o,1);return a}function a(n,t,e){try{ec=zt;var r=new zt;return r._=n,o(r,t,e)}finally{ec=Date}}n.floor=n,n.round=r,n.ceil=u,n.offset=i,n.range=o;var c=n.utc=Dt(n);return c.floor=c,c.round=Dt(r),c.ceil=Dt(u),c.offset=Dt(i),c.range=a,n}function Dt(n){return function(t,e){try{ec=zt;var r=new zt;return r._=t,n(r,e)._}finally{ec=Date}}}function Pt(n){function t(n){function t(t){for(var e,u,i,o=[],a=-1,c=0;++a<r;)37===n.charCodeAt(a)&&(o.push(n.substring(c,a)),null!=(u=uc[e=n.charAt(++a)])&&(e=n.charAt(++a)),(i=C[e])&&(e=i(t,null==u?"e"===e?" ":"0":u)),o.push(e),c=a+1);return o.push(n.substring(c,a)),o.join("")}var r=n.length;return t.parse=function(t){var r={y:1900,m:0,d:1,H:0,M:0,S:0,L:0,Z:null},u=e(r,n,t,0);if(u!=t.length)return null;"p"in r&&(r.H=r.H%12+12*r.p);var i=null!=r.Z&&ec!==zt,o=new(i?zt:ec);return"j"in r?o.setFullYear(r.y,0,r.j):"w"in r&&("W"in r||"U"in r)?(o.setFullYear(r.y,0,1),o.setFullYear(r.y,0,"W"in r?(r.w+6)%7+7*r.W-(o.getDay()+5)%7:r.w+7*r.U-(o.getDay()+6)%7)):o.setFullYear(r.y,r.m,r.d),o.setHours(r.H+Math.floor(r.Z/100),r.M+r.Z%100,r.S,r.L),i?o._:o},t.toString=function(){return n},t}function e(n,t,e,r){for(var u,i,o,a=0,c=t.length,s=e.length;c>a;){if(r>=s)return-1;if(u=t.charCodeAt(a++),37===u){if(o=t.charAt(a++),i=N[o in uc?t.charAt(a++):o],!i||(r=i(n,e,r))<0)return-1}else if(u!=e.charCodeAt(r++))return-1}return r}function r(n,t,e){b.lastIndex=0;var r=b.exec(t.substring(e));return r?(n.w=w.get(r[0].toLowerCase()),e+r[0].length):-1}function u(n,t,e){M.lastIndex=0;var r=M.exec(t.substring(e));return r?(n.w=_.get(r[0].toLowerCase()),e+r[0].length):-1}function i(n,t,e){E.lastIndex=0;var r=E.exec(t.substring(e));return r?(n.m=A.get(r[0].toLowerCase()),e+r[0].length):-1}function o(n,t,e){S.lastIndex=0;var r=S.exec(t.substring(e));return r?(n.m=k.get(r[0].toLowerCase()),e+r[0].length):-1}function a(n,t,r){return e(n,C.c.toString(),t,r)}function c(n,t,r){return e(n,C.x.toString(),t,r)}function s(n,t,r){return e(n,C.X.toString(),t,r)}function l(n,t,e){var r=x.get(t.substring(e,e+=2).toLowerCase());return null==r?-1:(n.p=r,e)}var f=n.dateTime,h=n.date,g=n.time,p=n.periods,v=n.days,d=n.shortDays,m=n.months,y=n.shortMonths;t.utc=function(n){function e(n){try{ec=zt;var t=new ec;return t._=n,r(t)}finally{ec=Date}}var r=t(n);return e.parse=function(n){try{ec=zt;var t=r.parse(n);return t&&t._}finally{ec=Date}},e.toString=r.toString,e},t.multi=t.utc.multi=ee;var x=Xo.map(),M=jt(v),_=Ht(v),b=jt(d),w=Ht(d),S=jt(m),k=Ht(m),E=jt(y),A=Ht(y);p.forEach(function(n,t){x.set(n.toLowerCase(),t)});var C={a:function(n){return d[n.getDay()]},A:function(n){return v[n.getDay()]},b:function(n){return y[n.getMonth()]},B:function(n){return m[n.getMonth()]},c:t(f),d:function(n,t){return Ut(n.getDate(),t,2)},e:function(n,t){return Ut(n.getDate(),t,2)},H:function(n,t){return Ut(n.getHours(),t,2)},I:function(n,t){return Ut(n.getHours()%12||12,t,2)},j:function(n,t){return Ut(1+tc.dayOfYear(n),t,3)},L:function(n,t){return Ut(n.getMilliseconds(),t,3)},m:function(n,t){return Ut(n.getMonth()+1,t,2)},M:function(n,t){return Ut(n.getMinutes(),t,2)},p:function(n){return p[+(n.getHours()>=12)]},S:function(n,t){return Ut(n.getSeconds(),t,2)},U:function(n,t){return Ut(tc.sundayOfYear(n),t,2)},w:function(n){return n.getDay()},W:function(n,t){return Ut(tc.mondayOfYear(n),t,2)},x:t(h),X:t(g),y:function(n,t){return Ut(n.getFullYear()%100,t,2)},Y:function(n,t){return Ut(n.getFullYear()%1e4,t,4)},Z:ne,"%":function(){return"%"}},N={a:r,A:u,b:i,B:o,c:a,d:Bt,e:Bt,H:Jt,I:Jt,j:Wt,L:Qt,m:$t,M:Gt,p:l,S:Kt,U:Ot,w:Ft,W:Yt,x:c,X:s,y:Zt,Y:It,Z:Vt,"%":te};return t}function Ut(n,t,e){var r=0>n?"-":"",u=(r?-n:n)+"",i=u.length;return r+(e>i?new Array(e-i+1).join(t)+u:u)}function jt(n){return new RegExp("^(?:"+n.map(Xo.requote).join("|")+")","i")}function Ht(n){for(var t=new u,e=-1,r=n.length;++e<r;)t.set(n[e].toLowerCase(),e);return t}function Ft(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+1));return r?(n.w=+r[0],e+r[0].length):-1}function Ot(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.U=+r[0],e+r[0].length):-1}function Yt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e));return r?(n.W=+r[0],e+r[0].length):-1}function It(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+4));return r?(n.y=+r[0],e+r[0].length):-1}function Zt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.y=Xt(+r[0]),e+r[0].length):-1}function Vt(n,t,e){return/^[+-]\d{4}$/.test(t=t.substring(e,e+5))?(n.Z=+t,e+5):-1}function Xt(n){return n+(n>68?1900:2e3)}function $t(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.m=r[0]-1,e+r[0].length):-1}function Bt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.d=+r[0],e+r[0].length):-1}function Wt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.j=+r[0],e+r[0].length):-1}function Jt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.H=+r[0],e+r[0].length):-1}function Gt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.M=+r[0],e+r[0].length):-1}function Kt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+2));return r?(n.S=+r[0],e+r[0].length):-1}function Qt(n,t,e){ic.lastIndex=0;var r=ic.exec(t.substring(e,e+3));return r?(n.L=+r[0],e+r[0].length):-1}function ne(n){var t=n.getTimezoneOffset(),e=t>0?"-":"+",r=~~(oa(t)/60),u=oa(t)%60;return e+Ut(r,"0",2)+Ut(u,"0",2)}function te(n,t,e){oc.lastIndex=0;var r=oc.exec(t.substring(e,e+1));return r?e+r[0].length:-1}function ee(n){for(var t=n.length,e=-1;++e<t;)n[e][0]=this(n[e][0]);return function(t){for(var e=0,r=n[e];!r[1](t);)r=n[++e];return r[0](t)}}function re(){}function ue(n,t,e){var r=e.s=n+t,u=r-n,i=r-u;e.t=n-i+(t-u)}function ie(n,t){n&&lc.hasOwnProperty(n.type)&&lc[n.type](n,t)}function oe(n,t,e){var r,u=-1,i=n.length-e;for(t.lineStart();++u<i;)r=n[u],t.point(r[0],r[1],r[2]);t.lineEnd()}function ae(n,t){var e=-1,r=n.length;for(t.polygonStart();++e<r;)oe(n[e],t,1);t.polygonEnd()}function ce(){function n(n,t){n*=Na,t=t*Na/2+Sa/4;var e=n-r,o=e>=0?1:-1,a=o*e,c=Math.cos(t),s=Math.sin(t),l=i*s,f=u*c+l*Math.cos(a),h=l*o*Math.sin(a);hc.add(Math.atan2(h,f)),r=n,u=c,i=s}var t,e,r,u,i;gc.point=function(o,a){gc.point=n,r=(t=o)*Na,u=Math.cos(a=(e=a)*Na/2+Sa/4),i=Math.sin(a)},gc.lineEnd=function(){n(t,e)}}function se(n){var t=n[0],e=n[1],r=Math.cos(e);return[r*Math.cos(t),r*Math.sin(t),Math.sin(e)]}function le(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]}function fe(n,t){return[n[1]*t[2]-n[2]*t[1],n[2]*t[0]-n[0]*t[2],n[0]*t[1]-n[1]*t[0]]}function he(n,t){n[0]+=t[0],n[1]+=t[1],n[2]+=t[2]}function ge(n,t){return[n[0]*t,n[1]*t,n[2]*t]}function pe(n){var t=Math.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2]);n[0]/=t,n[1]/=t,n[2]/=t}function ve(n){return[Math.atan2(n[1],n[0]),X(n[2])]}function de(n,t){return oa(n[0]-t[0])<Aa&&oa(n[1]-t[1])<Aa}function me(n,t){n*=Na;var e=Math.cos(t*=Na);ye(e*Math.cos(n),e*Math.sin(n),Math.sin(t))}function ye(n,t,e){++pc,dc+=(n-dc)/pc,mc+=(t-mc)/pc,yc+=(e-yc)/pc}function xe(){function n(n,u){n*=Na;var i=Math.cos(u*=Na),o=i*Math.cos(n),a=i*Math.sin(n),c=Math.sin(u),s=Math.atan2(Math.sqrt((s=e*c-r*a)*s+(s=r*o-t*c)*s+(s=t*a-e*o)*s),t*o+e*a+r*c);vc+=s,xc+=s*(t+(t=o)),Mc+=s*(e+(e=a)),_c+=s*(r+(r=c)),ye(t,e,r)}var t,e,r;kc.point=function(u,i){u*=Na;var o=Math.cos(i*=Na);t=o*Math.cos(u),e=o*Math.sin(u),r=Math.sin(i),kc.point=n,ye(t,e,r)}}function Me(){kc.point=me}function _e(){function n(n,t){n*=Na;var e=Math.cos(t*=Na),o=e*Math.cos(n),a=e*Math.sin(n),c=Math.sin(t),s=u*c-i*a,l=i*o-r*c,f=r*a-u*o,h=Math.sqrt(s*s+l*l+f*f),g=r*o+u*a+i*c,p=h&&-V(g)/h,v=Math.atan2(h,g);bc+=p*s,wc+=p*l,Sc+=p*f,vc+=v,xc+=v*(r+(r=o)),Mc+=v*(u+(u=a)),_c+=v*(i+(i=c)),ye(r,u,i)}var t,e,r,u,i;kc.point=function(o,a){t=o,e=a,kc.point=n,o*=Na;var c=Math.cos(a*=Na);r=c*Math.cos(o),u=c*Math.sin(o),i=Math.sin(a),ye(r,u,i)},kc.lineEnd=function(){n(t,e),kc.lineEnd=Me,kc.point=me}}function be(){return!0}function we(n,t,e,r,u){var i=[],o=[];if(n.forEach(function(n){if(!((t=n.length-1)<=0)){var t,e=n[0],r=n[t];if(de(e,r)){u.lineStart();for(var a=0;t>a;++a)u.point((e=n[a])[0],e[1]);return u.lineEnd(),void 0}var c=new ke(e,n,null,!0),s=new ke(e,null,c,!1);c.o=s,i.push(c),o.push(s),c=new ke(r,n,null,!1),s=new ke(r,null,c,!0),c.o=s,i.push(c),o.push(s)}}),o.sort(t),Se(i),Se(o),i.length){for(var a=0,c=e,s=o.length;s>a;++a)o[a].e=c=!c;for(var l,f,h=i[0];;){for(var g=h,p=!0;g.v;)if((g=g.n)===h)return;l=g.z,u.lineStart();do{if(g.v=g.o.v=!0,g.e){if(p)for(var a=0,s=l.length;s>a;++a)u.point((f=l[a])[0],f[1]);else r(g.x,g.n.x,1,u);g=g.n}else{if(p){l=g.p.z;for(var a=l.length-1;a>=0;--a)u.point((f=l[a])[0],f[1])}else r(g.x,g.p.x,-1,u);g=g.p}g=g.o,l=g.z,p=!p}while(!g.v);u.lineEnd()}}}function Se(n){if(t=n.length){for(var t,e,r=0,u=n[0];++r<t;)u.n=e=n[r],e.p=u,u=e;u.n=e=n[0],e.p=u}}function ke(n,t,e,r){this.x=n,this.z=t,this.o=e,this.e=r,this.v=!1,this.n=this.p=null}function Ee(n,t,e,r){return function(u,i){function o(t,e){var r=u(t,e);n(t=r[0],e=r[1])&&i.point(t,e)}function a(n,t){var e=u(n,t);d.point(e[0],e[1])}function c(){y.point=a,d.lineStart()}function s(){y.point=o,d.lineEnd()}function l(n,t){v.push([n,t]);var e=u(n,t);M.point(e[0],e[1])}function f(){M.lineStart(),v=[]}function h(){l(v[0][0],v[0][1]),M.lineEnd();var n,t=M.clean(),e=x.buffer(),r=e.length;if(v.pop(),p.push(v),v=null,r){if(1&t){n=e[0];var u,r=n.length-1,o=-1;for(i.lineStart();++o<r;)i.point((u=n[o])[0],u[1]);return i.lineEnd(),void 0}r>1&&2&t&&e.push(e.pop().concat(e.shift())),g.push(e.filter(Ae))}}var g,p,v,d=t(i),m=u.invert(r[0],r[1]),y={point:o,lineStart:c,lineEnd:s,polygonStart:function(){y.point=l,y.lineStart=f,y.lineEnd=h,g=[],p=[],i.polygonStart()},polygonEnd:function(){y.point=o,y.lineStart=c,y.lineEnd=s,g=Xo.merge(g);var n=Le(m,p);g.length?we(g,Ne,n,e,i):n&&(i.lineStart(),e(null,null,1,i),i.lineEnd()),i.polygonEnd(),g=p=null},sphere:function(){i.polygonStart(),i.lineStart(),e(null,null,1,i),i.lineEnd(),i.polygonEnd()}},x=Ce(),M=t(x);return y}}function Ae(n){return n.length>1}function Ce(){var n,t=[];return{lineStart:function(){t.push(n=[])},point:function(t,e){n.push([t,e])},lineEnd:g,buffer:function(){var e=t;return t=[],n=null,e},rejoin:function(){t.length>1&&t.push(t.pop().concat(t.shift()))}}}function Ne(n,t){return((n=n.x)[0]<0?n[1]-Ea-Aa:Ea-n[1])-((t=t.x)[0]<0?t[1]-Ea-Aa:Ea-t[1])}function Le(n,t){var e=n[0],r=n[1],u=[Math.sin(e),-Math.cos(e),0],i=0,o=0;hc.reset();for(var a=0,c=t.length;c>a;++a){var s=t[a],l=s.length;if(l)for(var f=s[0],h=f[0],g=f[1]/2+Sa/4,p=Math.sin(g),v=Math.cos(g),d=1;;){d===l&&(d=0),n=s[d];var m=n[0],y=n[1]/2+Sa/4,x=Math.sin(y),M=Math.cos(y),_=m-h,b=_>=0?1:-1,w=b*_,S=w>Sa,k=p*x;if(hc.add(Math.atan2(k*b*Math.sin(w),v*M+k*Math.cos(w))),i+=S?_+b*ka:_,S^h>=e^m>=e){var E=fe(se(f),se(n));pe(E);var A=fe(u,E);pe(A);var C=(S^_>=0?-1:1)*X(A[2]);(r>C||r===C&&(E[0]||E[1]))&&(o+=S^_>=0?1:-1)}if(!d++)break;h=m,p=x,v=M,f=n}}return(-Aa>i||Aa>i&&0>hc)^1&o}function Te(n){var t,e=0/0,r=0/0,u=0/0;return{lineStart:function(){n.lineStart(),t=1},point:function(i,o){var a=i>0?Sa:-Sa,c=oa(i-e);oa(c-Sa)<Aa?(n.point(e,r=(r+o)/2>0?Ea:-Ea),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),n.point(i,r),t=0):u!==a&&c>=Sa&&(oa(e-u)<Aa&&(e-=u*Aa),oa(i-a)<Aa&&(i-=a*Aa),r=qe(e,r,i,o),n.point(u,r),n.lineEnd(),n.lineStart(),n.point(a,r),t=0),n.point(e=i,r=o),u=a},lineEnd:function(){n.lineEnd(),e=r=0/0},clean:function(){return 2-t}}}function qe(n,t,e,r){var u,i,o=Math.sin(n-e);return oa(o)>Aa?Math.atan((Math.sin(t)*(i=Math.cos(r))*Math.sin(e)-Math.sin(r)*(u=Math.cos(t))*Math.sin(n))/(u*i*o)):(t+r)/2}function ze(n,t,e,r){var u;if(null==n)u=e*Ea,r.point(-Sa,u),r.point(0,u),r.point(Sa,u),r.point(Sa,0),r.point(Sa,-u),r.point(0,-u),r.point(-Sa,-u),r.point(-Sa,0),r.point(-Sa,u);else if(oa(n[0]-t[0])>Aa){var i=n[0]<t[0]?Sa:-Sa;u=e*i/2,r.point(-i,u),r.point(0,u),r.point(i,u)}else r.point(t[0],t[1])}function Re(n){function t(n,t){return Math.cos(n)*Math.cos(t)>i}function e(n){var e,i,c,s,l;return{lineStart:function(){s=c=!1,l=1},point:function(f,h){var g,p=[f,h],v=t(f,h),d=o?v?0:u(f,h):v?u(f+(0>f?Sa:-Sa),h):0;if(!e&&(s=c=v)&&n.lineStart(),v!==c&&(g=r(e,p),(de(e,g)||de(p,g))&&(p[0]+=Aa,p[1]+=Aa,v=t(p[0],p[1]))),v!==c)l=0,v?(n.lineStart(),g=r(p,e),n.point(g[0],g[1])):(g=r(e,p),n.point(g[0],g[1]),n.lineEnd()),e=g;else if(a&&e&&o^v){var m;d&i||!(m=r(p,e,!0))||(l=0,o?(n.lineStart(),n.point(m[0][0],m[0][1]),n.point(m[1][0],m[1][1]),n.lineEnd()):(n.point(m[1][0],m[1][1]),n.lineEnd(),n.lineStart(),n.point(m[0][0],m[0][1])))}!v||e&&de(e,p)||n.point(p[0],p[1]),e=p,c=v,i=d},lineEnd:function(){c&&n.lineEnd(),e=null},clean:function(){return l|(s&&c)<<1}}}function r(n,t,e){var r=se(n),u=se(t),o=[1,0,0],a=fe(r,u),c=le(a,a),s=a[0],l=c-s*s;if(!l)return!e&&n;var f=i*c/l,h=-i*s/l,g=fe(o,a),p=ge(o,f),v=ge(a,h);he(p,v);var d=g,m=le(p,d),y=le(d,d),x=m*m-y*(le(p,p)-1);if(!(0>x)){var M=Math.sqrt(x),_=ge(d,(-m-M)/y);if(he(_,p),_=ve(_),!e)return _;var b,w=n[0],S=t[0],k=n[1],E=t[1];w>S&&(b=w,w=S,S=b);var A=S-w,C=oa(A-Sa)<Aa,N=C||Aa>A;if(!C&&k>E&&(b=k,k=E,E=b),N?C?k+E>0^_[1]<(oa(_[0]-w)<Aa?k:E):k<=_[1]&&_[1]<=E:A>Sa^(w<=_[0]&&_[0]<=S)){var L=ge(d,(-m+M)/y);return he(L,p),[_,ve(L)]}}}function u(t,e){var r=o?n:Sa-n,u=0;return-r>t?u|=1:t>r&&(u|=2),-r>e?u|=4:e>r&&(u|=8),u}var i=Math.cos(n),o=i>0,a=oa(i)>Aa,c=cr(n,6*Na);return Ee(t,e,c,o?[0,-n]:[-Sa,n-Sa])}function De(n,t,e,r){return function(u){var i,o=u.a,a=u.b,c=o.x,s=o.y,l=a.x,f=a.y,h=0,g=1,p=l-c,v=f-s;if(i=n-c,p||!(i>0)){if(i/=p,0>p){if(h>i)return;g>i&&(g=i)}else if(p>0){if(i>g)return;i>h&&(h=i)}if(i=e-c,p||!(0>i)){if(i/=p,0>p){if(i>g)return;i>h&&(h=i)}else if(p>0){if(h>i)return;g>i&&(g=i)}if(i=t-s,v||!(i>0)){if(i/=v,0>v){if(h>i)return;g>i&&(g=i)}else if(v>0){if(i>g)return;i>h&&(h=i)}if(i=r-s,v||!(0>i)){if(i/=v,0>v){if(i>g)return;i>h&&(h=i)}else if(v>0){if(h>i)return;g>i&&(g=i)}return h>0&&(u.a={x:c+h*p,y:s+h*v}),1>g&&(u.b={x:c+g*p,y:s+g*v}),u}}}}}}function Pe(n,t,e,r){function u(r,u){return oa(r[0]-n)<Aa?u>0?0:3:oa(r[0]-e)<Aa?u>0?2:1:oa(r[1]-t)<Aa?u>0?1:0:u>0?3:2}function i(n,t){return o(n.x,t.x)}function o(n,t){var e=u(n,1),r=u(t,1);return e!==r?e-r:0===e?t[1]-n[1]:1===e?n[0]-t[0]:2===e?n[1]-t[1]:t[0]-n[0]}return function(a){function c(n){for(var t=0,e=d.length,r=n[1],u=0;e>u;++u)for(var i,o=1,a=d[u],c=a.length,s=a[0];c>o;++o)i=a[o],s[1]<=r?i[1]>r&&Z(s,i,n)>0&&++t:i[1]<=r&&Z(s,i,n)<0&&--t,s=i;return 0!==t}function s(i,a,c,s){var l=0,f=0;if(null==i||(l=u(i,c))!==(f=u(a,c))||o(i,a)<0^c>0){do s.point(0===l||3===l?n:e,l>1?r:t);while((l=(l+c+4)%4)!==f)}else s.point(a[0],a[1])}function l(u,i){return u>=n&&e>=u&&i>=t&&r>=i}function f(n,t){l(n,t)&&a.point(n,t)}function h(){N.point=p,d&&d.push(m=[]),S=!0,w=!1,_=b=0/0}function g(){v&&(p(y,x),M&&w&&A.rejoin(),v.push(A.buffer())),N.point=f,w&&a.lineEnd()}function p(n,t){n=Math.max(-Ac,Math.min(Ac,n)),t=Math.max(-Ac,Math.min(Ac,t));var e=l(n,t);if(d&&m.push([n,t]),S)y=n,x=t,M=e,S=!1,e&&(a.lineStart(),a.point(n,t));else if(e&&w)a.point(n,t);else{var r={a:{x:_,y:b},b:{x:n,y:t}};C(r)?(w||(a.lineStart(),a.point(r.a.x,r.a.y)),a.point(r.b.x,r.b.y),e||a.lineEnd(),k=!1):e&&(a.lineStart(),a.point(n,t),k=!1)}_=n,b=t,w=e}var v,d,m,y,x,M,_,b,w,S,k,E=a,A=Ce(),C=De(n,t,e,r),N={point:f,lineStart:h,lineEnd:g,polygonStart:function(){a=A,v=[],d=[],k=!0},polygonEnd:function(){a=E,v=Xo.merge(v);var t=c([n,r]),e=k&&t,u=v.length;(e||u)&&(a.polygonStart(),e&&(a.lineStart(),s(null,null,1,a),a.lineEnd()),u&&we(v,i,t,s,a),a.polygonEnd()),v=d=m=null}};return N}}function Ue(n,t){function e(e,r){return e=n(e,r),t(e[0],e[1])}return n.invert&&t.invert&&(e.invert=function(e,r){return e=t.invert(e,r),e&&n.invert(e[0],e[1])}),e}function je(n){var t=0,e=Sa/3,r=nr(n),u=r(t,e);return u.parallels=function(n){return arguments.length?r(t=n[0]*Sa/180,e=n[1]*Sa/180):[180*(t/Sa),180*(e/Sa)]},u}function He(n,t){function e(n,t){var e=Math.sqrt(i-2*u*Math.sin(t))/u;return[e*Math.sin(n*=u),o-e*Math.cos(n)]}var r=Math.sin(n),u=(r+Math.sin(t))/2,i=1+r*(2*u-r),o=Math.sqrt(i)/u;return e.invert=function(n,t){var e=o-t;return[Math.atan2(n,e)/u,X((i-(n*n+e*e)*u*u)/(2*u))]},e}function Fe(){function n(n,t){Nc+=u*n-r*t,r=n,u=t}var t,e,r,u;Rc.point=function(i,o){Rc.point=n,t=r=i,e=u=o},Rc.lineEnd=function(){n(t,e)}}function Oe(n,t){Lc>n&&(Lc=n),n>qc&&(qc=n),Tc>t&&(Tc=t),t>zc&&(zc=t)}function Ye(){function n(n,t){o.push("M",n,",",t,i)}function t(n,t){o.push("M",n,",",t),a.point=e}function e(n,t){o.push("L",n,",",t)}function r(){a.point=n}function u(){o.push("Z")}var i=Ie(4.5),o=[],a={point:n,lineStart:function(){a.point=t},lineEnd:r,polygonStart:function(){a.lineEnd=u},polygonEnd:function(){a.lineEnd=r,a.point=n},pointRadius:function(n){return i=Ie(n),a},result:function(){if(o.length){var n=o.join("");return o=[],n}}};return a}function Ie(n){return"m0,"+n+"a"+n+","+n+" 0 1,1 0,"+-2*n+"a"+n+","+n+" 0 1,1 0,"+2*n+"z"}function Ze(n,t){dc+=n,mc+=t,++yc}function Ve(){function n(n,r){var u=n-t,i=r-e,o=Math.sqrt(u*u+i*i);xc+=o*(t+n)/2,Mc+=o*(e+r)/2,_c+=o,Ze(t=n,e=r)}var t,e;Pc.point=function(r,u){Pc.point=n,Ze(t=r,e=u)}}function Xe(){Pc.point=Ze}function $e(){function n(n,t){var e=n-r,i=t-u,o=Math.sqrt(e*e+i*i);xc+=o*(r+n)/2,Mc+=o*(u+t)/2,_c+=o,o=u*n-r*t,bc+=o*(r+n),wc+=o*(u+t),Sc+=3*o,Ze(r=n,u=t)}var t,e,r,u;Pc.point=function(i,o){Pc.point=n,Ze(t=r=i,e=u=o)},Pc.lineEnd=function(){n(t,e)}}function Be(n){function t(t,e){n.moveTo(t,e),n.arc(t,e,o,0,ka)}function e(t,e){n.moveTo(t,e),a.point=r}function r(t,e){n.lineTo(t,e)}function u(){a.point=t}function i(){n.closePath()}var o=4.5,a={point:t,lineStart:function(){a.point=e},lineEnd:u,polygonStart:function(){a.lineEnd=i},polygonEnd:function(){a.lineEnd=u,a.point=t},pointRadius:function(n){return o=n,a},result:g};return a}function We(n){function t(n){return(a?r:e)(n)}function e(t){return Ke(t,function(e,r){e=n(e,r),t.point(e[0],e[1])})}function r(t){function e(e,r){e=n(e,r),t.point(e[0],e[1])}function r(){x=0/0,S.point=i,t.lineStart()}function i(e,r){var i=se([e,r]),o=n(e,r);u(x,M,y,_,b,w,x=o[0],M=o[1],y=e,_=i[0],b=i[1],w=i[2],a,t),t.point(x,M)}function o(){S.point=e,t.lineEnd()}function c(){r(),S.point=s,S.lineEnd=l}function s(n,t){i(f=n,h=t),g=x,p=M,v=_,d=b,m=w,S.point=i}function l(){u(x,M,y,_,b,w,g,p,f,v,d,m,a,t),S.lineEnd=o,o()}var f,h,g,p,v,d,m,y,x,M,_,b,w,S={point:e,lineStart:r,lineEnd:o,polygonStart:function(){t.polygonStart(),S.lineStart=c},polygonEnd:function(){t.polygonEnd(),S.lineStart=r}};return S}function u(t,e,r,a,c,s,l,f,h,g,p,v,d,m){var y=l-t,x=f-e,M=y*y+x*x;if(M>4*i&&d--){var _=a+g,b=c+p,w=s+v,S=Math.sqrt(_*_+b*b+w*w),k=Math.asin(w/=S),E=oa(oa(w)-1)<Aa||oa(r-h)<Aa?(r+h)/2:Math.atan2(b,_),A=n(E,k),C=A[0],N=A[1],L=C-t,T=N-e,q=x*L-y*T;(q*q/M>i||oa((y*L+x*T)/M-.5)>.3||o>a*g+c*p+s*v)&&(u(t,e,r,a,c,s,C,N,E,_/=S,b/=S,w,d,m),m.point(C,N),u(C,N,E,_,b,w,l,f,h,g,p,v,d,m))}}var i=.5,o=Math.cos(30*Na),a=16;return t.precision=function(n){return arguments.length?(a=(i=n*n)>0&&16,t):Math.sqrt(i)},t}function Je(n){var t=We(function(t,e){return n([t*La,e*La])});return function(n){return tr(t(n))}}function Ge(n){this.stream=n}function Ke(n,t){return{point:t,sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Qe(n){return nr(function(){return n})()}function nr(n){function t(n){return n=a(n[0]*Na,n[1]*Na),[n[0]*h+c,s-n[1]*h]}function e(n){return n=a.invert((n[0]-c)/h,(s-n[1])/h),n&&[n[0]*La,n[1]*La]}function r(){a=Ue(o=ur(m,y,x),i);var n=i(v,d);return c=g-n[0]*h,s=p+n[1]*h,u()}function u(){return l&&(l.valid=!1,l=null),t}var i,o,a,c,s,l,f=We(function(n,t){return n=i(n,t),[n[0]*h+c,s-n[1]*h]}),h=150,g=480,p=250,v=0,d=0,m=0,y=0,x=0,M=Ec,_=bt,b=null,w=null;return t.stream=function(n){return l&&(l.valid=!1),l=tr(M(o,f(_(n)))),l.valid=!0,l},t.clipAngle=function(n){return arguments.length?(M=null==n?(b=n,Ec):Re((b=+n)*Na),u()):b},t.clipExtent=function(n){return arguments.length?(w=n,_=n?Pe(n[0][0],n[0][1],n[1][0],n[1][1]):bt,u()):w},t.scale=function(n){return arguments.length?(h=+n,r()):h},t.translate=function(n){return arguments.length?(g=+n[0],p=+n[1],r()):[g,p]},t.center=function(n){return arguments.length?(v=n[0]%360*Na,d=n[1]%360*Na,r()):[v*La,d*La]},t.rotate=function(n){return arguments.length?(m=n[0]%360*Na,y=n[1]%360*Na,x=n.length>2?n[2]%360*Na:0,r()):[m*La,y*La,x*La]},Xo.rebind(t,f,"precision"),function(){return i=n.apply(this,arguments),t.invert=i.invert&&e,r()}}function tr(n){return Ke(n,function(t,e){n.point(t*Na,e*Na)})}function er(n,t){return[n,t]}function rr(n,t){return[n>Sa?n-ka:-Sa>n?n+ka:n,t]}function ur(n,t,e){return n?t||e?Ue(or(n),ar(t,e)):or(n):t||e?ar(t,e):rr}function ir(n){return function(t,e){return t+=n,[t>Sa?t-ka:-Sa>t?t+ka:t,e]}}function or(n){var t=ir(n);return t.invert=ir(-n),t}function ar(n,t){function e(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*r+a*u;return[Math.atan2(c*i-l*o,a*r-s*u),X(l*i+c*o)]}var r=Math.cos(n),u=Math.sin(n),i=Math.cos(t),o=Math.sin(t);return e.invert=function(n,t){var e=Math.cos(t),a=Math.cos(n)*e,c=Math.sin(n)*e,s=Math.sin(t),l=s*i-c*o;return[Math.atan2(c*i+s*o,a*r+l*u),X(l*r-a*u)]},e}function cr(n,t){var e=Math.cos(n),r=Math.sin(n);return function(u,i,o,a){var c=o*t;null!=u?(u=sr(e,u),i=sr(e,i),(o>0?i>u:u>i)&&(u+=o*ka)):(u=n+o*ka,i=n-.5*c);for(var s,l=u;o>0?l>i:i>l;l-=c)a.point((s=ve([e,-r*Math.cos(l),-r*Math.sin(l)]))[0],s[1])}}function sr(n,t){var e=se(t);e[0]-=n,pe(e);var r=V(-e[1]);return((-e[2]<0?-r:r)+2*Math.PI-Aa)%(2*Math.PI)}function lr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[n,t]})}}function fr(n,t,e){var r=Xo.range(n,t-Aa,e).concat(t);return function(n){return r.map(function(t){return[t,n]})}}function hr(n){return n.source}function gr(n){return n.target}function pr(n,t,e,r){var u=Math.cos(t),i=Math.sin(t),o=Math.cos(r),a=Math.sin(r),c=u*Math.cos(n),s=u*Math.sin(n),l=o*Math.cos(e),f=o*Math.sin(e),h=2*Math.asin(Math.sqrt(J(r-t)+u*o*J(e-n))),g=1/Math.sin(h),p=h?function(n){var t=Math.sin(n*=h)*g,e=Math.sin(h-n)*g,r=e*c+t*l,u=e*s+t*f,o=e*i+t*a;return[Math.atan2(u,r)*La,Math.atan2(o,Math.sqrt(r*r+u*u))*La]}:function(){return[n*La,t*La]};return p.distance=h,p}function vr(){function n(n,u){var i=Math.sin(u*=Na),o=Math.cos(u),a=oa((n*=Na)-t),c=Math.cos(a);Uc+=Math.atan2(Math.sqrt((a=o*Math.sin(a))*a+(a=r*i-e*o*c)*a),e*i+r*o*c),t=n,e=i,r=o}var t,e,r;jc.point=function(u,i){t=u*Na,e=Math.sin(i*=Na),r=Math.cos(i),jc.point=n},jc.lineEnd=function(){jc.point=jc.lineEnd=g}}function dr(n,t){function e(t,e){var r=Math.cos(t),u=Math.cos(e),i=n(r*u);return[i*u*Math.sin(t),i*Math.sin(e)]}return e.invert=function(n,e){var r=Math.sqrt(n*n+e*e),u=t(r),i=Math.sin(u),o=Math.cos(u);return[Math.atan2(n*i,r*o),Math.asin(r&&e*i/r)]},e}function mr(n,t){function e(n,t){var e=oa(oa(t)-Ea)<Aa?0:o/Math.pow(u(t),i);return[e*Math.sin(i*n),o-e*Math.cos(i*n)]}var r=Math.cos(n),u=function(n){return Math.tan(Sa/4+n/2)},i=n===t?Math.sin(n):Math.log(r/Math.cos(t))/Math.log(u(t)/u(n)),o=r*Math.pow(u(n),i)/i;return i?(e.invert=function(n,t){var e=o-t,r=I(i)*Math.sqrt(n*n+e*e);return[Math.atan2(n,e)/i,2*Math.atan(Math.pow(o/r,1/i))-Ea]},e):xr}function yr(n,t){function e(n,t){var e=i-t;return[e*Math.sin(u*n),i-e*Math.cos(u*n)]}var r=Math.cos(n),u=n===t?Math.sin(n):(r-Math.cos(t))/(t-n),i=r/u+n;return oa(u)<Aa?er:(e.invert=function(n,t){var e=i-t;return[Math.atan2(n,e)/u,i-I(u)*Math.sqrt(n*n+e*e)]},e)}function xr(n,t){return[n,Math.log(Math.tan(Sa/4+t/2))]}function Mr(n){var t,e=Qe(n),r=e.scale,u=e.translate,i=e.clipExtent;return e.scale=function(){var n=r.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.translate=function(){var n=u.apply(e,arguments);return n===e?t?e.clipExtent(null):e:n},e.clipExtent=function(n){var o=i.apply(e,arguments);if(o===e){if(t=null==n){var a=Sa*r(),c=u();i([[c[0]-a,c[1]-a],[c[0]+a,c[1]+a]])}}else t&&(o=null);return o},e.clipExtent(null)}function _r(n,t){return[Math.log(Math.tan(Sa/4+t/2)),-n]}function br(n){return n[0]}function wr(n){return n[1]}function Sr(n){for(var t=n.length,e=[0,1],r=2,u=2;t>u;u++){for(;r>1&&Z(n[e[r-2]],n[e[r-1]],n[u])<=0;)--r;e[r++]=u}return e.slice(0,r)}function kr(n,t){return n[0]-t[0]||n[1]-t[1]}function Er(n,t,e){return(e[0]-t[0])*(n[1]-t[1])<(e[1]-t[1])*(n[0]-t[0])}function Ar(n,t,e,r){var u=n[0],i=e[0],o=t[0]-u,a=r[0]-i,c=n[1],s=e[1],l=t[1]-c,f=r[1]-s,h=(a*(c-s)-f*(u-i))/(f*o-a*l);return[u+h*o,c+h*l]}function Cr(n){var t=n[0],e=n[n.length-1];return!(t[0]-e[0]||t[1]-e[1])}function Nr(){Jr(this),this.edge=this.site=this.circle=null}function Lr(n){var t=Jc.pop()||new Nr;return t.site=n,t}function Tr(n){Or(n),$c.remove(n),Jc.push(n),Jr(n)}function qr(n){var t=n.circle,e=t.x,r=t.cy,u={x:e,y:r},i=n.P,o=n.N,a=[n];Tr(n);for(var c=i;c.circle&&oa(e-c.circle.x)<Aa&&oa(r-c.circle.cy)<Aa;)i=c.P,a.unshift(c),Tr(c),c=i;a.unshift(c),Or(c);for(var s=o;s.circle&&oa(e-s.circle.x)<Aa&&oa(r-s.circle.cy)<Aa;)o=s.N,a.push(s),Tr(s),s=o;a.push(s),Or(s);var l,f=a.length;for(l=1;f>l;++l)s=a[l],c=a[l-1],$r(s.edge,c.site,s.site,u);c=a[0],s=a[f-1],s.edge=Vr(c.site,s.site,null,u),Fr(c),Fr(s)}function zr(n){for(var t,e,r,u,i=n.x,o=n.y,a=$c._;a;)if(r=Rr(a,o)-i,r>Aa)a=a.L;else{if(u=i-Dr(a,o),!(u>Aa)){r>-Aa?(t=a.P,e=a):u>-Aa?(t=a,e=a.N):t=e=a;break}if(!a.R){t=a;break}a=a.R}var c=Lr(n);if($c.insert(t,c),t||e){if(t===e)return Or(t),e=Lr(t.site),$c.insert(c,e),c.edge=e.edge=Vr(t.site,c.site),Fr(t),Fr(e),void 0;if(!e)return c.edge=Vr(t.site,c.site),void 0;Or(t),Or(e);var s=t.site,l=s.x,f=s.y,h=n.x-l,g=n.y-f,p=e.site,v=p.x-l,d=p.y-f,m=2*(h*d-g*v),y=h*h+g*g,x=v*v+d*d,M={x:(d*y-g*x)/m+l,y:(h*x-v*y)/m+f};$r(e.edge,s,p,M),c.edge=Vr(s,n,null,M),e.edge=Vr(n,p,null,M),Fr(t),Fr(e)}}function Rr(n,t){var e=n.site,r=e.x,u=e.y,i=u-t;if(!i)return r;var o=n.P;if(!o)return-1/0;e=o.site;var a=e.x,c=e.y,s=c-t;if(!s)return a;var l=a-r,f=1/i-1/s,h=l/s;return f?(-h+Math.sqrt(h*h-2*f*(l*l/(-2*s)-c+s/2+u-i/2)))/f+r:(r+a)/2}function Dr(n,t){var e=n.N;if(e)return Rr(e,t);var r=n.site;return r.y===t?r.x:1/0}function Pr(n){this.site=n,this.edges=[]}function Ur(n){for(var t,e,r,u,i,o,a,c,s,l,f=n[0][0],h=n[1][0],g=n[0][1],p=n[1][1],v=Xc,d=v.length;d--;)if(i=v[d],i&&i.prepare())for(a=i.edges,c=a.length,o=0;c>o;)l=a[o].end(),r=l.x,u=l.y,s=a[++o%c].start(),t=s.x,e=s.y,(oa(r-t)>Aa||oa(u-e)>Aa)&&(a.splice(o,0,new Br(Xr(i.site,l,oa(r-f)<Aa&&p-u>Aa?{x:f,y:oa(t-f)<Aa?e:p}:oa(u-p)<Aa&&h-r>Aa?{x:oa(e-p)<Aa?t:h,y:p}:oa(r-h)<Aa&&u-g>Aa?{x:h,y:oa(t-h)<Aa?e:g}:oa(u-g)<Aa&&r-f>Aa?{x:oa(e-g)<Aa?t:f,y:g}:null),i.site,null)),++c)}function jr(n,t){return t.angle-n.angle}function Hr(){Jr(this),this.x=this.y=this.arc=this.site=this.cy=null}function Fr(n){var t=n.P,e=n.N;if(t&&e){var r=t.site,u=n.site,i=e.site;if(r!==i){var o=u.x,a=u.y,c=r.x-o,s=r.y-a,l=i.x-o,f=i.y-a,h=2*(c*f-s*l);if(!(h>=-Ca)){var g=c*c+s*s,p=l*l+f*f,v=(f*g-s*p)/h,d=(c*p-l*g)/h,f=d+a,m=Gc.pop()||new Hr;m.arc=n,m.site=u,m.x=v+o,m.y=f+Math.sqrt(v*v+d*d),m.cy=f,n.circle=m;for(var y=null,x=Wc._;x;)if(m.y<x.y||m.y===x.y&&m.x<=x.x){if(!x.L){y=x.P;break}x=x.L}else{if(!x.R){y=x;break}x=x.R}Wc.insert(y,m),y||(Bc=m)}}}}function Or(n){var t=n.circle;t&&(t.P||(Bc=t.N),Wc.remove(t),Gc.push(t),Jr(t),n.circle=null)}function Yr(n){for(var t,e=Vc,r=De(n[0][0],n[0][1],n[1][0],n[1][1]),u=e.length;u--;)t=e[u],(!Ir(t,n)||!r(t)||oa(t.a.x-t.b.x)<Aa&&oa(t.a.y-t.b.y)<Aa)&&(t.a=t.b=null,e.splice(u,1))}function Ir(n,t){var e=n.b;if(e)return!0;var r,u,i=n.a,o=t[0][0],a=t[1][0],c=t[0][1],s=t[1][1],l=n.l,f=n.r,h=l.x,g=l.y,p=f.x,v=f.y,d=(h+p)/2,m=(g+v)/2;if(v===g){if(o>d||d>=a)return;if(h>p){if(i){if(i.y>=s)return}else i={x:d,y:c};e={x:d,y:s}}else{if(i){if(i.y<c)return}else i={x:d,y:s};e={x:d,y:c}}}else if(r=(h-p)/(v-g),u=m-r*d,-1>r||r>1)if(h>p){if(i){if(i.y>=s)return}else i={x:(c-u)/r,y:c};e={x:(s-u)/r,y:s}}else{if(i){if(i.y<c)return}else i={x:(s-u)/r,y:s};e={x:(c-u)/r,y:c}}else if(v>g){if(i){if(i.x>=a)return}else i={x:o,y:r*o+u};e={x:a,y:r*a+u}}else{if(i){if(i.x<o)return}else i={x:a,y:r*a+u};e={x:o,y:r*o+u}}return n.a=i,n.b=e,!0}function Zr(n,t){this.l=n,this.r=t,this.a=this.b=null}function Vr(n,t,e,r){var u=new Zr(n,t);return Vc.push(u),e&&$r(u,n,t,e),r&&$r(u,t,n,r),Xc[n.i].edges.push(new Br(u,n,t)),Xc[t.i].edges.push(new Br(u,t,n)),u}function Xr(n,t,e){var r=new Zr(n,null);return r.a=t,r.b=e,Vc.push(r),r}function $r(n,t,e,r){n.a||n.b?n.l===e?n.b=r:n.a=r:(n.a=r,n.l=t,n.r=e)}function Br(n,t,e){var r=n.a,u=n.b;this.edge=n,this.site=t,this.angle=e?Math.atan2(e.y-t.y,e.x-t.x):n.l===t?Math.atan2(u.x-r.x,r.y-u.y):Math.atan2(r.x-u.x,u.y-r.y)}function Wr(){this._=null}function Jr(n){n.U=n.C=n.L=n.R=n.P=n.N=null}function Gr(n,t){var e=t,r=t.R,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.R=r.L,e.R&&(e.R.U=e),r.L=e}function Kr(n,t){var e=t,r=t.L,u=e.U;u?u.L===e?u.L=r:u.R=r:n._=r,r.U=u,e.U=r,e.L=r.R,e.L&&(e.L.U=e),r.R=e}function Qr(n){for(;n.L;)n=n.L;return n}function nu(n,t){var e,r,u,i=n.sort(tu).pop();for(Vc=[],Xc=new Array(n.length),$c=new Wr,Wc=new Wr;;)if(u=Bc,i&&(!u||i.y<u.y||i.y===u.y&&i.x<u.x))(i.x!==e||i.y!==r)&&(Xc[i.i]=new Pr(i),zr(i),e=i.x,r=i.y),i=n.pop();else{if(!u)break;qr(u.arc)}t&&(Yr(t),Ur(t));var o={cells:Xc,edges:Vc};return $c=Wc=Vc=Xc=null,o}function tu(n,t){return t.y-n.y||t.x-n.x}function eu(n,t,e){return(n.x-e.x)*(t.y-n.y)-(n.x-t.x)*(e.y-n.y)}function ru(n){return n.x}function uu(n){return n.y}function iu(){return{leaf:!0,nodes:[],point:null,x:null,y:null}}function ou(n,t,e,r,u,i){if(!n(t,e,r,u,i)){var o=.5*(e+u),a=.5*(r+i),c=t.nodes;c[0]&&ou(n,c[0],e,r,o,a),c[1]&&ou(n,c[1],o,r,u,a),c[2]&&ou(n,c[2],e,a,o,i),c[3]&&ou(n,c[3],o,a,u,i)}}function au(n,t){n=Xo.rgb(n),t=Xo.rgb(t);var e=n.r,r=n.g,u=n.b,i=t.r-e,o=t.g-r,a=t.b-u;return function(n){return"#"+vt(Math.round(e+i*n))+vt(Math.round(r+o*n))+vt(Math.round(u+a*n))}}function cu(n,t){var e,r={},u={};for(e in n)e in t?r[e]=fu(n[e],t[e]):u[e]=n[e];for(e in t)e in n||(u[e]=t[e]);return function(n){for(e in r)u[e]=r[e](n);return u}}function su(n,t){return t-=n=+n,function(e){return n+t*e}}function lu(n,t){var e,r,u,i,o,a=0,c=0,s=[],l=[];for(n+="",t+="",Qc.lastIndex=0,r=0;e=Qc.exec(t);++r)e.index&&s.push(t.substring(a,c=e.index)),l.push({i:s.length,x:e[0]}),s.push(null),a=Qc.lastIndex;for(a<t.length&&s.push(t.substring(a)),r=0,i=l.length;(e=Qc.exec(n))&&i>r;++r)if(o=l[r],o.x==e[0]){if(o.i)if(null==s[o.i+1])for(s[o.i-1]+=o.x,s.splice(o.i,1),u=r+1;i>u;++u)l[u].i--;else for(s[o.i-1]+=o.x+s[o.i+1],s.splice(o.i,2),u=r+1;i>u;++u)l[u].i-=2;else if(null==s[o.i+1])s[o.i]=o.x;else for(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1),u=r+1;i>u;++u)l[u].i--;l.splice(r,1),i--,r--}else o.x=su(parseFloat(e[0]),parseFloat(o.x));for(;i>r;)o=l.pop(),null==s[o.i+1]?s[o.i]=o.x:(s[o.i]=o.x+s[o.i+1],s.splice(o.i+1,1)),i--;return 1===s.length?null==s[0]?(o=l[0].x,function(n){return o(n)+""}):function(){return t}:function(n){for(r=0;i>r;++r)s[(o=l[r]).i]=o.x(n);return s.join("")}}function fu(n,t){for(var e,r=Xo.interpolators.length;--r>=0&&!(e=Xo.interpolators[r](n,t)););return e}function hu(n,t){var e,r=[],u=[],i=n.length,o=t.length,a=Math.min(n.length,t.length);for(e=0;a>e;++e)r.push(fu(n[e],t[e]));for(;i>e;++e)u[e]=n[e];for(;o>e;++e)u[e]=t[e];return function(n){for(e=0;a>e;++e)u[e]=r[e](n);return u}}function gu(n){return function(t){return 0>=t?0:t>=1?1:n(t)}}function pu(n){return function(t){return 1-n(1-t)}}function vu(n){return function(t){return.5*(.5>t?n(2*t):2-n(2-2*t))}}function du(n){return n*n}function mu(n){return n*n*n}function yu(n){if(0>=n)return 0;if(n>=1)return 1;var t=n*n,e=t*n;return 4*(.5>n?e:3*(n-t)+e-.75)}function xu(n){return function(t){return Math.pow(t,n)}}function Mu(n){return 1-Math.cos(n*Ea)}function _u(n){return Math.pow(2,10*(n-1))}function bu(n){return 1-Math.sqrt(1-n*n)}function wu(n,t){var e;return arguments.length<2&&(t=.45),arguments.length?e=t/ka*Math.asin(1/n):(n=1,e=t/4),function(r){return 1+n*Math.pow(2,-10*r)*Math.sin((r-e)*ka/t)}}function Su(n){return n||(n=1.70158),function(t){return t*t*((n+1)*t-n)}}function ku(n){return 1/2.75>n?7.5625*n*n:2/2.75>n?7.5625*(n-=1.5/2.75)*n+.75:2.5/2.75>n?7.5625*(n-=2.25/2.75)*n+.9375:7.5625*(n-=2.625/2.75)*n+.984375}function Eu(n,t){n=Xo.hcl(n),t=Xo.hcl(t);var e=n.h,r=n.c,u=n.l,i=t.h-e,o=t.c-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.c:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return rt(e+i*n,r+o*n,u+a*n)+""}}function Au(n,t){n=Xo.hsl(n),t=Xo.hsl(t);var e=n.h,r=n.s,u=n.l,i=t.h-e,o=t.s-r,a=t.l-u;return isNaN(o)&&(o=0,r=isNaN(r)?t.s:r),isNaN(i)?(i=0,e=isNaN(e)?t.h:e):i>180?i-=360:-180>i&&(i+=360),function(n){return nt(e+i*n,r+o*n,u+a*n)+""}}function Cu(n,t){n=Xo.lab(n),t=Xo.lab(t);var e=n.l,r=n.a,u=n.b,i=t.l-e,o=t.a-r,a=t.b-u;return function(n){return ot(e+i*n,r+o*n,u+a*n)+""}}function Nu(n,t){return t-=n,function(e){return Math.round(n+t*e)}}function Lu(n){var t=[n.a,n.b],e=[n.c,n.d],r=qu(t),u=Tu(t,e),i=qu(zu(e,t,-u))||0;t[0]*e[1]<e[0]*t[1]&&(t[0]*=-1,t[1]*=-1,r*=-1,u*=-1),this.rotate=(r?Math.atan2(t[1],t[0]):Math.atan2(-e[0],e[1]))*La,this.translate=[n.e,n.f],this.scale=[r,i],this.skew=i?Math.atan2(u,i)*La:0}function Tu(n,t){return n[0]*t[0]+n[1]*t[1]}function qu(n){var t=Math.sqrt(Tu(n,n));return t&&(n[0]/=t,n[1]/=t),t}function zu(n,t,e){return n[0]+=e*t[0],n[1]+=e*t[1],n}function Ru(n,t){var e,r=[],u=[],i=Xo.transform(n),o=Xo.transform(t),a=i.translate,c=o.translate,s=i.rotate,l=o.rotate,f=i.skew,h=o.skew,g=i.scale,p=o.scale;return a[0]!=c[0]||a[1]!=c[1]?(r.push("translate(",null,",",null,")"),u.push({i:1,x:su(a[0],c[0])},{i:3,x:su(a[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),s!=l?(s-l>180?l+=360:l-s>180&&(s+=360),u.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:su(s,l)})):l&&r.push(r.pop()+"rotate("+l+")"),f!=h?u.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:su(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),g[0]!=p[0]||g[1]!=p[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),u.push({i:e-4,x:su(g[0],p[0])},{i:e-2,x:su(g[1],p[1])})):(1!=p[0]||1!=p[1])&&r.push(r.pop()+"scale("+p+")"),e=u.length,function(n){for(var t,i=-1;++i<e;)r[(t=u[i]).i]=t.x(n);return r.join("")}}function Du(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return(e-n)*t}}function Pu(n,t){return t=t-(n=+n)?1/(t-n):0,function(e){return Math.max(0,Math.min(1,(e-n)*t))}}function Uu(n){for(var t=n.source,e=n.target,r=Hu(t,e),u=[t];t!==r;)t=t.parent,u.push(t);for(var i=u.length;e!==r;)u.splice(i,0,e),e=e.parent;return u}function ju(n){for(var t=[],e=n.parent;null!=e;)t.push(n),n=e,e=e.parent;return t.push(n),t}function Hu(n,t){if(n===t)return n;for(var e=ju(n),r=ju(t),u=e.pop(),i=r.pop(),o=null;u===i;)o=u,u=e.pop(),i=r.pop();return o}function Fu(n){n.fixed|=2}function Ou(n){n.fixed&=-7}function Yu(n){n.fixed|=4,n.px=n.x,n.py=n.y}function Iu(n){n.fixed&=-5}function Zu(n,t,e){var r=0,u=0;if(n.charge=0,!n.leaf)for(var i,o=n.nodes,a=o.length,c=-1;++c<a;)i=o[c],null!=i&&(Zu(i,t,e),n.charge+=i.charge,r+=i.charge*i.cx,u+=i.charge*i.cy);if(n.point){n.leaf||(n.point.x+=Math.random()-.5,n.point.y+=Math.random()-.5);var s=t*e[n.point.index];n.charge+=n.pointCharge=s,r+=s*n.point.x,u+=s*n.point.y}n.cx=r/n.charge,n.cy=u/n.charge}function Vu(n,t){return Xo.rebind(n,t,"sort","children","value"),n.nodes=n,n.links=Wu,n}function Xu(n){return n.children}function $u(n){return n.value}function Bu(n,t){return t.value-n.value}function Wu(n){return Xo.merge(n.map(function(n){return(n.children||[]).map(function(t){return{source:n,target:t}})}))}function Ju(n){return n.x}function Gu(n){return n.y}function Ku(n,t,e){n.y0=t,n.y=e}function Qu(n){return Xo.range(n.length)}function ni(n){for(var t=-1,e=n[0].length,r=[];++t<e;)r[t]=0;return r}function ti(n){for(var t,e=1,r=0,u=n[0][1],i=n.length;i>e;++e)(t=n[e][1])>u&&(r=e,u=t);return r}function ei(n){return n.reduce(ri,0)}function ri(n,t){return n+t[1]}function ui(n,t){return ii(n,Math.ceil(Math.log(t.length)/Math.LN2+1))}function ii(n,t){for(var e=-1,r=+n[0],u=(n[1]-r)/t,i=[];++e<=t;)i[e]=u*e+r;return i}function oi(n){return[Xo.min(n),Xo.max(n)]}function ai(n,t){return n.parent==t.parent?1:2}function ci(n){var t=n.children;return t&&t.length?t[0]:n._tree.thread}function si(n){var t,e=n.children;return e&&(t=e.length)?e[t-1]:n._tree.thread}function li(n,t){var e=n.children;if(e&&(u=e.length))for(var r,u,i=-1;++i<u;)t(r=li(e[i],t),n)>0&&(n=r);return n}function fi(n,t){return n.x-t.x}function hi(n,t){return t.x-n.x}function gi(n,t){return n.depth-t.depth}function pi(n,t){function e(n,r){var u=n.children;if(u&&(o=u.length))for(var i,o,a=null,c=-1;++c<o;)i=u[c],e(i,a),a=i;t(n,r)}e(n,null)}function vi(n){for(var t,e=0,r=0,u=n.children,i=u.length;--i>=0;)t=u[i]._tree,t.prelim+=e,t.mod+=e,e+=t.shift+(r+=t.change)}function di(n,t,e){n=n._tree,t=t._tree;var r=e/(t.number-n.number);n.change+=r,t.change-=r,t.shift+=e,t.prelim+=e,t.mod+=e}function mi(n,t,e){return n._tree.ancestor.parent==t.parent?n._tree.ancestor:e}function yi(n,t){return n.value-t.value}function xi(n,t){var e=n._pack_next;n._pack_next=t,t._pack_prev=n,t._pack_next=e,e._pack_prev=t}function Mi(n,t){n._pack_next=t,t._pack_prev=n}function _i(n,t){var e=t.x-n.x,r=t.y-n.y,u=n.r+t.r;return.999*u*u>e*e+r*r}function bi(n){function t(n){l=Math.min(n.x-n.r,l),f=Math.max(n.x+n.r,f),h=Math.min(n.y-n.r,h),g=Math.max(n.y+n.r,g)}if((e=n.children)&&(s=e.length)){var e,r,u,i,o,a,c,s,l=1/0,f=-1/0,h=1/0,g=-1/0;if(e.forEach(wi),r=e[0],r.x=-r.r,r.y=0,t(r),s>1&&(u=e[1],u.x=u.r,u.y=0,t(u),s>2))for(i=e[2],Ei(r,u,i),t(i),xi(r,i),r._pack_prev=i,xi(i,u),u=r._pack_next,o=3;s>o;o++){Ei(r,u,i=e[o]);var p=0,v=1,d=1;for(a=u._pack_next;a!==u;a=a._pack_next,v++)if(_i(a,i)){p=1;break}if(1==p)for(c=r._pack_prev;c!==a._pack_prev&&!_i(c,i);c=c._pack_prev,d++);p?(d>v||v==d&&u.r<r.r?Mi(r,u=a):Mi(r=c,u),o--):(xi(r,i),u=i,t(i))}var m=(l+f)/2,y=(h+g)/2,x=0;for(o=0;s>o;o++)i=e[o],i.x-=m,i.y-=y,x=Math.max(x,i.r+Math.sqrt(i.x*i.x+i.y*i.y));n.r=x,e.forEach(Si)}}function wi(n){n._pack_next=n._pack_prev=n}function Si(n){delete n._pack_next,delete n._pack_prev}function ki(n,t,e,r){var u=n.children;if(n.x=t+=r*n.x,n.y=e+=r*n.y,n.r*=r,u)for(var i=-1,o=u.length;++i<o;)ki(u[i],t,e,r)}function Ei(n,t,e){var r=n.r+e.r,u=t.x-n.x,i=t.y-n.y;if(r&&(u||i)){var o=t.r+e.r,a=u*u+i*i;o*=o,r*=r;var c=.5+(r-o)/(2*a),s=Math.sqrt(Math.max(0,2*o*(r+a)-(r-=a)*r-o*o))/(2*a);e.x=n.x+c*u+s*i,e.y=n.y+c*i-s*u}else e.x=n.x+r,e.y=n.y}function Ai(n){return 1+Xo.max(n,function(n){return n.y})}function Ci(n){return n.reduce(function(n,t){return n+t.x},0)/n.length}function Ni(n){var t=n.children;return t&&t.length?Ni(t[0]):n}function Li(n){var t,e=n.children;return e&&(t=e.length)?Li(e[t-1]):n}function Ti(n){return{x:n.x,y:n.y,dx:n.dx,dy:n.dy}}function qi(n,t){var e=n.x+t[3],r=n.y+t[0],u=n.dx-t[1]-t[3],i=n.dy-t[0]-t[2];return 0>u&&(e+=u/2,u=0),0>i&&(r+=i/2,i=0),{x:e,y:r,dx:u,dy:i}}function zi(n){var t=n[0],e=n[n.length-1];return e>t?[t,e]:[e,t]}function Ri(n){return n.rangeExtent?n.rangeExtent():zi(n.range())}function Di(n,t,e,r){var u=e(n[0],n[1]),i=r(t[0],t[1]);return function(n){return i(u(n))}}function Pi(n,t){var e,r=0,u=n.length-1,i=n[r],o=n[u];return i>o&&(e=r,r=u,u=e,e=i,i=o,o=e),n[r]=t.floor(i),n[u]=t.ceil(o),n}function Ui(n){return n?{floor:function(t){return Math.floor(t/n)*n},ceil:function(t){return Math.ceil(t/n)*n}}:ls}function ji(n,t,e,r){var u=[],i=[],o=0,a=Math.min(n.length,t.length)-1;for(n[a]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<=a;)u.push(e(n[o-1],n[o])),i.push(r(t[o-1],t[o]));return function(t){var e=Xo.bisect(n,t,1,a)-1;return i[e](u[e](t))}}function Hi(n,t,e,r){function u(){var u=Math.min(n.length,t.length)>2?ji:Di,c=r?Pu:Du;return o=u(n,t,c,e),a=u(t,n,c,fu),i}function i(n){return o(n)}var o,a;return i.invert=function(n){return a(n)},i.domain=function(t){return arguments.length?(n=t.map(Number),u()):n},i.range=function(n){return arguments.length?(t=n,u()):t},i.rangeRound=function(n){return i.range(n).interpolate(Nu)},i.clamp=function(n){return arguments.length?(r=n,u()):r},i.interpolate=function(n){return arguments.length?(e=n,u()):e},i.ticks=function(t){return Ii(n,t)},i.tickFormat=function(t,e){return Zi(n,t,e)},i.nice=function(t){return Oi(n,t),u()},i.copy=function(){return Hi(n,t,e,r)},u()}function Fi(n,t){return Xo.rebind(n,t,"range","rangeRound","interpolate","clamp")}function Oi(n,t){return Pi(n,Ui(Yi(n,t)[2]))}function Yi(n,t){null==t&&(t=10);var e=zi(n),r=e[1]-e[0],u=Math.pow(10,Math.floor(Math.log(r/t)/Math.LN10)),i=t/r*u;return.15>=i?u*=10:.35>=i?u*=5:.75>=i&&(u*=2),e[0]=Math.ceil(e[0]/u)*u,e[1]=Math.floor(e[1]/u)*u+.5*u,e[2]=u,e}function Ii(n,t){return Xo.range.apply(Xo,Yi(n,t))}function Zi(n,t,e){var r=Yi(n,t);return Xo.format(e?e.replace(Qa,function(n,t,e,u,i,o,a,c,s,l){return[t,e,u,i,o,a,c,s||"."+Xi(l,r),l].join("")}):",."+Vi(r[2])+"f")}function Vi(n){return-Math.floor(Math.log(n)/Math.LN10+.01)}function Xi(n,t){var e=Vi(t[2]);return n in fs?Math.abs(e-Vi(Math.max(Math.abs(t[0]),Math.abs(t[1]))))+ +("e"!==n):e-2*("%"===n)}function $i(n,t,e,r){function u(n){return(e?Math.log(0>n?0:n):-Math.log(n>0?0:-n))/Math.log(t)}function i(n){return e?Math.pow(t,n):-Math.pow(t,-n)}function o(t){return n(u(t))}return o.invert=function(t){return i(n.invert(t))},o.domain=function(t){return arguments.length?(e=t[0]>=0,n.domain((r=t.map(Number)).map(u)),o):r},o.base=function(e){return arguments.length?(t=+e,n.domain(r.map(u)),o):t},o.nice=function(){var t=Pi(r.map(u),e?Math:gs);return n.domain(t),r=t.map(i),o},o.ticks=function(){var n=zi(r),o=[],a=n[0],c=n[1],s=Math.floor(u(a)),l=Math.ceil(u(c)),f=t%1?2:t;if(isFinite(l-s)){if(e){for(;l>s;s++)for(var h=1;f>h;h++)o.push(i(s)*h);o.push(i(s))}else for(o.push(i(s));s++<l;)for(var h=f-1;h>0;h--)o.push(i(s)*h);for(s=0;o[s]<a;s++);for(l=o.length;o[l-1]>c;l--);o=o.slice(s,l)}return o},o.tickFormat=function(n,t){if(!arguments.length)return hs;arguments.length<2?t=hs:"function"!=typeof t&&(t=Xo.format(t));var r,a=Math.max(.1,n/o.ticks().length),c=e?(r=1e-12,Math.ceil):(r=-1e-12,Math.floor);return function(n){return n/i(c(u(n)+r))<=a?t(n):""}},o.copy=function(){return $i(n.copy(),t,e,r)},Fi(o,n)}function Bi(n,t,e){function r(t){return n(u(t))}var u=Wi(t),i=Wi(1/t);return r.invert=function(t){return i(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain((e=t.map(Number)).map(u)),r):e},r.ticks=function(n){return Ii(e,n)},r.tickFormat=function(n,t){return Zi(e,n,t)},r.nice=function(n){return r.domain(Oi(e,n))},r.exponent=function(o){return arguments.length?(u=Wi(t=o),i=Wi(1/t),n.domain(e.map(u)),r):t},r.copy=function(){return Bi(n.copy(),t,e)},Fi(r,n)}function Wi(n){return function(t){return 0>t?-Math.pow(-t,n):Math.pow(t,n)}}function Ji(n,t){function e(e){return o[((i.get(e)||"range"===t.t&&i.set(e,n.push(e)))-1)%o.length]}function r(t,e){return Xo.range(n.length).map(function(n){return t+e*n})}var i,o,a;return e.domain=function(r){if(!arguments.length)return n;n=[],i=new u;for(var o,a=-1,c=r.length;++a<c;)i.has(o=r[a])||i.set(o,n.push(o));return e[t.t].apply(e,t.a)},e.range=function(n){return arguments.length?(o=n,a=0,t={t:"range",a:arguments},e):o},e.rangePoints=function(u,i){arguments.length<2&&(i=0);var c=u[0],s=u[1],l=(s-c)/(Math.max(1,n.length-1)+i);return o=r(n.length<2?(c+s)/2:c+l*i/2,l),a=0,t={t:"rangePoints",a:arguments},e},e.rangeBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=(f-l)/(n.length-i+2*c);return o=r(l+h*c,h),s&&o.reverse(),a=h*(1-i),t={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(u,i,c){arguments.length<2&&(i=0),arguments.length<3&&(c=i);var s=u[1]<u[0],l=u[s-0],f=u[1-s],h=Math.floor((f-l)/(n.length-i+2*c)),g=f-l-(n.length-i)*h;return o=r(l+Math.round(g/2),h),s&&o.reverse(),a=Math.round(h*(1-i)),t={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return a},e.rangeExtent=function(){return zi(t.a[0])},e.copy=function(){return Ji(n,t)},e.domain(n)}function Gi(n,t){function e(){var e=0,i=t.length;for(u=[];++e<i;)u[e-1]=Xo.quantile(n,e/i);return r}function r(n){return isNaN(n=+n)?void 0:t[Xo.bisect(u,n)]}var u;return r.domain=function(t){return arguments.length?(n=t.filter(function(n){return!isNaN(n)}).sort(Xo.ascending),e()):n},r.range=function(n){return arguments.length?(t=n,e()):t},r.quantiles=function(){return u},r.invertExtent=function(e){return e=t.indexOf(e),0>e?[0/0,0/0]:[e>0?u[e-1]:n[0],e<u.length?u[e]:n[n.length-1]]},r.copy=function(){return Gi(n,t)},e()}function Ki(n,t,e){function r(t){return e[Math.max(0,Math.min(o,Math.floor(i*(t-n))))]}function u(){return i=e.length/(t-n),o=e.length-1,r}var i,o;return r.domain=function(e){return arguments.length?(n=+e[0],t=+e[e.length-1],u()):[n,t]},r.range=function(n){return arguments.length?(e=n,u()):e},r.invertExtent=function(t){return t=e.indexOf(t),t=0>t?0/0:t/i+n,[t,t+1/i]},r.copy=function(){return Ki(n,t,e)},u()}function Qi(n,t){function e(e){return e>=e?t[Xo.bisect(n,e)]:void 0}return e.domain=function(t){return arguments.length?(n=t,e):n},e.range=function(n){return arguments.length?(t=n,e):t},e.invertExtent=function(e){return e=t.indexOf(e),[n[e-1],n[e]]},e.copy=function(){return Qi(n,t)},e}function no(n){function t(n){return+n}return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=e.map(t),t):n},t.ticks=function(t){return Ii(n,t)},t.tickFormat=function(t,e){return Zi(n,t,e)},t.copy=function(){return no(n)},t}function to(n){return n.innerRadius}function eo(n){return n.outerRadius}function ro(n){return n.startAngle}function uo(n){return n.endAngle}function io(n){function t(t){function o(){s.push("M",i(n(l),a))}for(var c,s=[],l=[],f=-1,h=t.length,g=_t(e),p=_t(r);++f<h;)u.call(this,c=t[f],f)?l.push([+g.call(this,c,f),+p.call(this,c,f)]):l.length&&(o(),l=[]);return l.length&&o(),s.length?s.join(""):null}var e=br,r=wr,u=be,i=oo,o=i.key,a=.7;return t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t.defined=function(n){return arguments.length?(u=n,t):u},t.interpolate=function(n){return arguments.length?(o="function"==typeof n?i=n:(i=Ms.get(n)||oo).key,t):o},t.tension=function(n){return arguments.length?(a=n,t):a},t}function oo(n){return n.join("L")}function ao(n){return oo(n)+"Z"}function co(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r[0]+(r=n[t])[0])/2,"V",r[1]);return e>1&&u.push("H",r[0]),u.join("")}function so(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("V",(r=n[t])[1],"H",r[0]);return u.join("")}function lo(n){for(var t=0,e=n.length,r=n[0],u=[r[0],",",r[1]];++t<e;)u.push("H",(r=n[t])[0],"V",r[1]);return u.join("")}function fo(n,t){return n.length<4?oo(n):n[1]+po(n.slice(1,n.length-1),vo(n,t))}function ho(n,t){return n.length<3?oo(n):n[0]+po((n.push(n[0]),n),vo([n[n.length-2]].concat(n,[n[1]]),t))}function go(n,t){return n.length<3?oo(n):n[0]+po(n,vo(n,t))}function po(n,t){if(t.length<1||n.length!=t.length&&n.length!=t.length+2)return oo(n);var e=n.length!=t.length,r="",u=n[0],i=n[1],o=t[0],a=o,c=1;if(e&&(r+="Q"+(i[0]-2*o[0]/3)+","+(i[1]-2*o[1]/3)+","+i[0]+","+i[1],u=n[1],c=2),t.length>1){a=t[1],i=n[c],c++,r+="C"+(u[0]+o[0])+","+(u[1]+o[1])+","+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1];for(var s=2;s<t.length;s++,c++)i=n[c],a=t[s],r+="S"+(i[0]-a[0])+","+(i[1]-a[1])+","+i[0]+","+i[1]}if(e){var l=n[c];r+="Q"+(i[0]+2*a[0]/3)+","+(i[1]+2*a[1]/3)+","+l[0]+","+l[1]}return r}function vo(n,t){for(var e,r=[],u=(1-t)/2,i=n[0],o=n[1],a=1,c=n.length;++a<c;)e=i,i=o,o=n[a],r.push([u*(o[0]-e[0]),u*(o[1]-e[1])]);return r}function mo(n){if(n.length<3)return oo(n);var t=1,e=n.length,r=n[0],u=r[0],i=r[1],o=[u,u,u,(r=n[1])[0]],a=[i,i,i,r[1]],c=[u,",",i,"L",_o(ws,o),",",_o(ws,a)];for(n.push(n[e-1]);++t<=e;)r=n[t],o.shift(),o.push(r[0]),a.shift(),a.push(r[1]),bo(c,o,a);return n.pop(),c.push("L",r),c.join("")}function yo(n){if(n.length<4)return oo(n);for(var t,e=[],r=-1,u=n.length,i=[0],o=[0];++r<3;)t=n[r],i.push(t[0]),o.push(t[1]);for(e.push(_o(ws,i)+","+_o(ws,o)),--r;++r<u;)t=n[r],i.shift(),i.push(t[0]),o.shift(),o.push(t[1]),bo(e,i,o);return e.join("")}function xo(n){for(var t,e,r=-1,u=n.length,i=u+4,o=[],a=[];++r<4;)e=n[r%u],o.push(e[0]),a.push(e[1]);for(t=[_o(ws,o),",",_o(ws,a)],--r;++r<i;)e=n[r%u],o.shift(),o.push(e[0]),a.shift(),a.push(e[1]),bo(t,o,a);return t.join("")}function Mo(n,t){var e=n.length-1;if(e)for(var r,u,i=n[0][0],o=n[0][1],a=n[e][0]-i,c=n[e][1]-o,s=-1;++s<=e;)r=n[s],u=s/e,r[0]=t*r[0]+(1-t)*(i+u*a),r[1]=t*r[1]+(1-t)*(o+u*c);return mo(n)}function _o(n,t){return n[0]*t[0]+n[1]*t[1]+n[2]*t[2]+n[3]*t[3]}function bo(n,t,e){n.push("C",_o(_s,t),",",_o(_s,e),",",_o(bs,t),",",_o(bs,e),",",_o(ws,t),",",_o(ws,e))}function wo(n,t){return(t[1]-n[1])/(t[0]-n[0])}function So(n){for(var t=0,e=n.length-1,r=[],u=n[0],i=n[1],o=r[0]=wo(u,i);++t<e;)r[t]=(o+(o=wo(u=i,i=n[t+1])))/2;return r[t]=o,r}function ko(n){for(var t,e,r,u,i=[],o=So(n),a=-1,c=n.length-1;++a<c;)t=wo(n[a],n[a+1]),oa(t)<Aa?o[a]=o[a+1]=0:(e=o[a]/t,r=o[a+1]/t,u=e*e+r*r,u>9&&(u=3*t/Math.sqrt(u),o[a]=u*e,o[a+1]=u*r));for(a=-1;++a<=c;)u=(n[Math.min(c,a+1)][0]-n[Math.max(0,a-1)][0])/(6*(1+o[a]*o[a])),i.push([u||0,o[a]*u||0]);return i}function Eo(n){return n.length<3?oo(n):n[0]+po(n,ko(n))}function Ao(n){for(var t,e,r,u=-1,i=n.length;++u<i;)t=n[u],e=t[0],r=t[1]+ys,t[0]=e*Math.cos(r),t[1]=e*Math.sin(r);return n}function Co(n){function t(t){function c(){v.push("M",a(n(m),f),l,s(n(d.reverse()),f),"Z")}for(var h,g,p,v=[],d=[],m=[],y=-1,x=t.length,M=_t(e),_=_t(u),b=e===r?function(){return g}:_t(r),w=u===i?function(){return p}:_t(i);++y<x;)o.call(this,h=t[y],y)?(d.push([g=+M.call(this,h,y),p=+_.call(this,h,y)]),m.push([+b.call(this,h,y),+w.call(this,h,y)])):d.length&&(c(),d=[],m=[]);return d.length&&c(),v.length?v.join(""):null}var e=br,r=br,u=0,i=wr,o=be,a=oo,c=a.key,s=a,l="L",f=.7;return t.x=function(n){return arguments.length?(e=r=n,t):r},t.x0=function(n){return arguments.length?(e=n,t):e},t.x1=function(n){return arguments.length?(r=n,t):r},t.y=function(n){return arguments.length?(u=i=n,t):i},t.y0=function(n){return arguments.length?(u=n,t):u},t.y1=function(n){return arguments.length?(i=n,t):i},t.defined=function(n){return arguments.length?(o=n,t):o},t.interpolate=function(n){return arguments.length?(c="function"==typeof n?a=n:(a=Ms.get(n)||oo).key,s=a.reverse||a,l=a.closed?"M":"L",t):c},t.tension=function(n){return arguments.length?(f=n,t):f},t}function No(n){return n.radius}function Lo(n){return[n.x,n.y]}function To(n){return function(){var t=n.apply(this,arguments),e=t[0],r=t[1]+ys;return[e*Math.cos(r),e*Math.sin(r)]}}function qo(){return 64}function zo(){return"circle"}function Ro(n){var t=Math.sqrt(n/Sa);return"M0,"+t+"A"+t+","+t+" 0 1,1 0,"+-t+"A"+t+","+t+" 0 1,1 0,"+t+"Z"}function Do(n,t){return fa(n,Ns),n.id=t,n}function Po(n,t,e,r){var u=n.id;return R(n,"function"==typeof e?function(n,i,o){n.__transition__[u].tween.set(t,r(e.call(n,n.__data__,i,o)))}:(e=r(e),function(n){n.__transition__[u].tween.set(t,e)}))}function Uo(n){return null==n&&(n=""),function(){this.textContent=n}}function jo(n,t,e,r){var i=n.__transition__||(n.__transition__={active:0,count:0}),o=i[e];if(!o){var a=r.time;o=i[e]={tween:new u,time:a,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,Xo.timer(function(r){function u(r){return i.active>e?s():(i.active=e,o.event&&o.event.start.call(n,l,t),o.tween.forEach(function(e,r){(r=r.call(n,l,t))&&v.push(r)}),Xo.timer(function(){return p.c=c(r||1)?be:c,1},0,a),void 0)}function c(r){if(i.active!==e)return s();for(var u=r/g,a=f(u),c=v.length;c>0;)v[--c].call(n,a);return u>=1?(o.event&&o.event.end.call(n,l,t),s()):void 0}function s(){return--i.count?delete i[e]:delete n.__transition__,1}var l=n.__data__,f=o.ease,h=o.delay,g=o.duration,p=Ja,v=[];return p.t=h+a,r>=h?u(r-h):(p.c=u,void 0)},0,a)}}function Ho(n,t){n.attr("transform",function(n){return"translate("+t(n)+",0)"})}function Fo(n,t){n.attr("transform",function(n){return"translate(0,"+t(n)+")"})}function Oo(n){return n.toISOString()}function Yo(n,t,e){function r(t){return n(t)}function u(n,e){var r=n[1]-n[0],u=r/e,i=Xo.bisect(js,u);return i==js.length?[t.year,Yi(n.map(function(n){return n/31536e6}),e)[2]]:i?t[u/js[i-1]<js[i]/u?i-1:i]:[Os,Yi(n,e)[2]]}return r.invert=function(t){return Io(n.invert(t))},r.domain=function(t){return arguments.length?(n.domain(t),r):n.domain().map(Io)},r.nice=function(n,t){function e(e){return!isNaN(e)&&!n.range(e,Io(+e+1),t).length}var i=r.domain(),o=zi(i),a=null==n?u(o,10):"number"==typeof n&&u(o,n);return a&&(n=a[0],t=a[1]),r.domain(Pi(i,t>1?{floor:function(t){for(;e(t=n.floor(t));)t=Io(t-1);return t},ceil:function(t){for(;e(t=n.ceil(t));)t=Io(+t+1);return t}}:n))},r.ticks=function(n,t){var e=zi(r.domain()),i=null==n?u(e,10):"number"==typeof n?u(e,n):!n.range&&[{range:n},t];return i&&(n=i[0],t=i[1]),n.range(e[0],Io(+e[1]+1),1>t?1:t)},r.tickFormat=function(){return e},r.copy=function(){return Yo(n.copy(),t,e)},Fi(r,n)}function Io(n){return new Date(n)}function Zo(n){return JSON.parse(n.responseText)}function Vo(n){var t=Wo.createRange();return t.selectNode(Wo.body),t.createContextualFragment(n.responseText)}var Xo={version:"3.4.3"};Date.now||(Date.now=function(){return+new Date});var $o=[].slice,Bo=function(n){return $o.call(n)},Wo=document,Jo=Wo.documentElement,Go=window;try{Bo(Jo.childNodes)[0].nodeType}catch(Ko){Bo=function(n){for(var t=n.length,e=new Array(t);t--;)e[t]=n[t];return e}}try{Wo.createElement("div").style.setProperty("opacity",0,"")}catch(Qo){var na=Go.Element.prototype,ta=na.setAttribute,ea=na.setAttributeNS,ra=Go.CSSStyleDeclaration.prototype,ua=ra.setProperty;na.setAttribute=function(n,t){ta.call(this,n,t+"")},na.setAttributeNS=function(n,t,e){ea.call(this,n,t,e+"")},ra.setProperty=function(n,t,e){ua.call(this,n,t+"",e)}}Xo.ascending=function(n,t){return t>n?-1:n>t?1:n>=t?0:0/0},Xo.descending=function(n,t){return n>t?-1:t>n?1:t>=n?0:0/0},Xo.min=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&e>r&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&e>r&&(e=r)}return e},Xo.max=function(n,t){var e,r,u=-1,i=n.length;if(1===arguments.length){for(;++u<i&&!(null!=(e=n[u])&&e>=e);)e=void 0;for(;++u<i;)null!=(r=n[u])&&r>e&&(e=r)}else{for(;++u<i&&!(null!=(e=t.call(n,n[u],u))&&e>=e);)e=void 0;for(;++u<i;)null!=(r=t.call(n,n[u],u))&&r>e&&(e=r)}return e},Xo.extent=function(n,t){var e,r,u,i=-1,o=n.length;if(1===arguments.length){for(;++i<o&&!(null!=(e=u=n[i])&&e>=e);)e=u=void 0;for(;++i<o;)null!=(r=n[i])&&(e>r&&(e=r),r>u&&(u=r))}else{for(;++i<o&&!(null!=(e=u=t.call(n,n[i],i))&&e>=e);)e=void 0;for(;++i<o;)null!=(r=t.call(n,n[i],i))&&(e>r&&(e=r),r>u&&(u=r))}return[e,u]},Xo.sum=function(n,t){var e,r=0,u=n.length,i=-1;if(1===arguments.length)for(;++i<u;)isNaN(e=+n[i])||(r+=e);else for(;++i<u;)isNaN(e=+t.call(n,n[i],i))||(r+=e);return r},Xo.mean=function(t,e){var r,u=t.length,i=0,o=-1,a=0;if(1===arguments.length)for(;++o<u;)n(r=t[o])&&(i+=(r-i)/++a);else for(;++o<u;)n(r=e.call(t,t[o],o))&&(i+=(r-i)/++a);return a?i:void 0},Xo.quantile=function(n,t){var e=(n.length-1)*t+1,r=Math.floor(e),u=+n[r-1],i=e-r;return i?u+i*(n[r]-u):u},Xo.median=function(t,e){return arguments.length>1&&(t=t.map(e)),t=t.filter(n),t.length?Xo.quantile(t.sort(Xo.ascending),.5):void 0},Xo.bisector=function(n){return{left:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;n.call(t,t[i],i)<e?r=i+1:u=i}return r},right:function(t,e,r,u){for(arguments.length<3&&(r=0),arguments.length<4&&(u=t.length);u>r;){var i=r+u>>>1;e<n.call(t,t[i],i)?u=i:r=i+1}return r}}};var ia=Xo.bisector(function(n){return n});Xo.bisectLeft=ia.left,Xo.bisect=Xo.bisectRight=ia.right,Xo.shuffle=function(n){for(var t,e,r=n.length;r;)e=0|Math.random()*r--,t=n[r],n[r]=n[e],n[e]=t;return n},Xo.permute=function(n,t){for(var e=t.length,r=new Array(e);e--;)r[e]=n[t[e]];return r},Xo.pairs=function(n){for(var t,e=0,r=n.length-1,u=n[0],i=new Array(0>r?0:r);r>e;)i[e]=[t=u,u=n[++e]];return i},Xo.zip=function(){if(!(u=arguments.length))return[];for(var n=-1,e=Xo.min(arguments,t),r=new Array(e);++n<e;)for(var u,i=-1,o=r[n]=new Array(u);++i<u;)o[i]=arguments[i][n];return r},Xo.transpose=function(n){return Xo.zip.apply(Xo,n)},Xo.keys=function(n){var t=[];for(var e in n)t.push(e);return t},Xo.values=function(n){var t=[];for(var e in n)t.push(n[e]);return t},Xo.entries=function(n){var t=[];for(var e in n)t.push({key:e,value:n[e]});return t},Xo.merge=function(n){for(var t,e,r,u=n.length,i=-1,o=0;++i<u;)o+=n[i].length;for(e=new Array(o);--u>=0;)for(r=n[u],t=r.length;--t>=0;)e[--o]=r[t];return e};var oa=Math.abs;Xo.range=function(n,t,r){if(arguments.length<3&&(r=1,arguments.length<2&&(t=n,n=0)),1/0===(t-n)/r)throw new Error("infinite range");var u,i=[],o=e(oa(r)),a=-1;if(n*=o,t*=o,r*=o,0>r)for(;(u=n+r*++a)>t;)i.push(u/o);else for(;(u=n+r*++a)<t;)i.push(u/o);return i},Xo.map=function(n){var t=new u;if(n instanceof u)n.forEach(function(n,e){t.set(n,e)});else for(var e in n)t.set(e,n[e]);return t},r(u,{has:i,get:function(n){return this[aa+n]},set:function(n,t){return this[aa+n]=t},remove:o,keys:a,values:function(){var n=[];return this.forEach(function(t,e){n.push(e)}),n},entries:function(){var n=[];return this.forEach(function(t,e){n.push({key:t,value:e})}),n},size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1),this[t])}});var aa="\x00",ca=aa.charCodeAt(0);Xo.nest=function(){function n(t,a,c){if(c>=o.length)return r?r.call(i,a):e?a.sort(e):a;for(var s,l,f,h,g=-1,p=a.length,v=o[c++],d=new u;++g<p;)(h=d.get(s=v(l=a[g])))?h.push(l):d.set(s,[l]);return t?(l=t(),f=function(e,r){l.set(e,n(t,r,c))}):(l={},f=function(e,r){l[e]=n(t,r,c)}),d.forEach(f),l}function t(n,e){if(e>=o.length)return n;var r=[],u=a[e++];return n.forEach(function(n,u){r.push({key:n,values:t(u,e)})}),u?r.sort(function(n,t){return u(n.key,t.key)}):r}var e,r,i={},o=[],a=[];return i.map=function(t,e){return n(e,t,0)},i.entries=function(e){return t(n(Xo.map,e,0),0)},i.key=function(n){return o.push(n),i},i.sortKeys=function(n){return a[o.length-1]=n,i},i.sortValues=function(n){return e=n,i},i.rollup=function(n){return r=n,i},i},Xo.set=function(n){var t=new l;if(n)for(var e=0,r=n.length;r>e;++e)t.add(n[e]);return t},r(l,{has:i,add:function(n){return this[aa+n]=!0,n},remove:function(n){return n=aa+n,n in this&&delete this[n]},values:a,size:c,empty:s,forEach:function(n){for(var t in this)t.charCodeAt(0)===ca&&n.call(this,t.substring(1))}}),Xo.behavior={},Xo.rebind=function(n,t){for(var e,r=1,u=arguments.length;++r<u;)n[e=arguments[r]]=f(n,t,t[e]);return n};var sa=["webkit","ms","moz","Moz","o","O"];Xo.dispatch=function(){for(var n=new p,t=-1,e=arguments.length;++t<e;)n[arguments[t]]=v(n);return n},p.prototype.on=function(n,t){var e=n.indexOf("."),r="";if(e>=0&&(r=n.substring(e+1),n=n.substring(0,e)),n)return arguments.length<2?this[n].on(r):this[n].on(r,t);if(2===arguments.length){if(null==t)for(n in this)this.hasOwnProperty(n)&&this[n].on(r,null);return this}},Xo.event=null,Xo.requote=function(n){return n.replace(la,"\\$&")};var la=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,fa={}.__proto__?function(n,t){n.__proto__=t}:function(n,t){for(var e in t)n[e]=t[e]},ha=function(n,t){return t.querySelector(n)},ga=function(n,t){return t.querySelectorAll(n)},pa=Jo[h(Jo,"matchesSelector")],va=function(n,t){return pa.call(n,t)};"function"==typeof Sizzle&&(ha=function(n,t){return Sizzle(n,t)[0]||null},ga=Sizzle,va=Sizzle.matchesSelector),Xo.selection=function(){return xa};var da=Xo.selection.prototype=[];da.select=function(n){var t,e,r,u,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]),t.parentNode=(r=this[o]).parentNode;for(var c=-1,s=r.length;++c<s;)(u=r[c])?(t.push(e=n.call(u,u.__data__,c,o)),e&&"__data__"in u&&(e.__data__=u.__data__)):t.push(null)}return x(i)},da.selectAll=function(n){var t,e,r=[];n=_(n);for(var u=-1,i=this.length;++u<i;)for(var o=this[u],a=-1,c=o.length;++a<c;)(e=o[a])&&(r.push(t=Bo(n.call(e,e.__data__,a,u))),t.parentNode=e);return x(r)};var ma={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};Xo.ns={prefix:ma,qualify:function(n){var t=n.indexOf(":"),e=n;return t>=0&&(e=n.substring(0,t),n=n.substring(t+1)),ma.hasOwnProperty(e)?{space:ma[e],local:n}:n}},da.attr=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node();return n=Xo.ns.qualify(n),n.local?e.getAttributeNS(n.space,n.local):e.getAttribute(n)}for(t in n)this.each(b(t,n[t]));return this}return this.each(b(n,t))},da.classed=function(n,t){if(arguments.length<2){if("string"==typeof n){var e=this.node(),r=(n=k(n)).length,u=-1;if(t=e.classList){for(;++u<r;)if(!t.contains(n[u]))return!1}else for(t=e.getAttribute("class");++u<r;)if(!S(n[u]).test(t))return!1;return!0}for(t in n)this.each(E(t,n[t]));return this}return this.each(E(n,t))},da.style=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t="");for(e in n)this.each(C(e,n[e],t));return this}if(2>r)return Go.getComputedStyle(this.node(),null).getPropertyValue(n);e=""}return this.each(C(n,t,e))},da.property=function(n,t){if(arguments.length<2){if("string"==typeof n)return this.node()[n];for(t in n)this.each(N(t,n[t]));return this}return this.each(N(n,t))},da.text=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.textContent=null==t?"":t}:null==n?function(){this.textContent=""}:function(){this.textContent=n}):this.node().textContent},da.html=function(n){return arguments.length?this.each("function"==typeof n?function(){var t=n.apply(this,arguments);this.innerHTML=null==t?"":t}:null==n?function(){this.innerHTML=""}:function(){this.innerHTML=n}):this.node().innerHTML},da.append=function(n){return n=L(n),this.select(function(){return this.appendChild(n.apply(this,arguments))})},da.insert=function(n,t){return n=L(n),t=M(t),this.select(function(){return this.insertBefore(n.apply(this,arguments),t.apply(this,arguments)||null)})},da.remove=function(){return this.each(function(){var n=this.parentNode;n&&n.removeChild(this)})},da.data=function(n,t){function e(n,e){var r,i,o,a=n.length,f=e.length,h=Math.min(a,f),g=new Array(f),p=new Array(f),v=new Array(a);if(t){var d,m=new u,y=new u,x=[];for(r=-1;++r<a;)d=t.call(i=n[r],i.__data__,r),m.has(d)?v[r]=i:m.set(d,i),x.push(d);for(r=-1;++r<f;)d=t.call(e,o=e[r],r),(i=m.get(d))?(g[r]=i,i.__data__=o):y.has(d)||(p[r]=T(o)),y.set(d,o),m.remove(d);for(r=-1;++r<a;)m.has(x[r])&&(v[r]=n[r])}else{for(r=-1;++r<h;)i=n[r],o=e[r],i?(i.__data__=o,g[r]=i):p[r]=T(o);for(;f>r;++r)p[r]=T(e[r]);for(;a>r;++r)v[r]=n[r]}p.update=g,p.parentNode=g.parentNode=v.parentNode=n.parentNode,c.push(p),s.push(g),l.push(v)}var r,i,o=-1,a=this.length;if(!arguments.length){for(n=new Array(a=(r=this[0]).length);++o<a;)(i=r[o])&&(n[o]=i.__data__);return n}var c=D([]),s=x([]),l=x([]);if("function"==typeof n)for(;++o<a;)e(r=this[o],n.call(r,r.parentNode.__data__,o));else for(;++o<a;)e(r=this[o],n);return s.enter=function(){return c},s.exit=function(){return l},s},da.datum=function(n){return arguments.length?this.property("__data__",n):this.property("__data__")},da.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]),t.parentNode=(e=this[i]).parentNode;for(var a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return x(u)},da.order=function(){for(var n=-1,t=this.length;++n<t;)for(var e,r=this[n],u=r.length-1,i=r[u];--u>=0;)(e=r[u])&&(i&&i!==e.nextSibling&&i.parentNode.insertBefore(e,i),i=e);return this},da.sort=function(n){n=z.apply(this,arguments);for(var t=-1,e=this.length;++t<e;)this[t].sort(n);return this.order()},da.each=function(n){return R(this,function(t,e,r){n.call(t,t.__data__,e,r)})},da.call=function(n){var t=Bo(arguments);return n.apply(t[0]=this,t),this},da.empty=function(){return!this.node()},da.node=function(){for(var n=0,t=this.length;t>n;n++)for(var e=this[n],r=0,u=e.length;u>r;r++){var i=e[r];if(i)return i}return null},da.size=function(){var n=0;return this.each(function(){++n}),n};var ya=[];Xo.selection.enter=D,Xo.selection.enter.prototype=ya,ya.append=da.append,ya.empty=da.empty,ya.node=da.node,ya.call=da.call,ya.size=da.size,ya.select=function(n){for(var t,e,r,u,i,o=[],a=-1,c=this.length;++a<c;){r=(u=this[a]).update,o.push(t=[]),t.parentNode=u.parentNode;for(var s=-1,l=u.length;++s<l;)(i=u[s])?(t.push(r[s]=e=n.call(u.parentNode,i.__data__,s,a)),e.__data__=i.__data__):t.push(null)}return x(o)},ya.insert=function(n,t){return arguments.length<2&&(t=P(this)),da.insert.call(this,n,t)},da.transition=function(){for(var n,t,e=ks||++Ls,r=[],u=Es||{time:Date.now(),ease:yu,delay:0,duration:250},i=-1,o=this.length;++i<o;){r.push(n=[]);for(var a=this[i],c=-1,s=a.length;++c<s;)(t=a[c])&&jo(t,c,e,u),n.push(t)}return Do(r,e)},da.interrupt=function(){return this.each(U)},Xo.select=function(n){var t=["string"==typeof n?ha(n,Wo):n];return t.parentNode=Jo,x([t])},Xo.selectAll=function(n){var t=Bo("string"==typeof n?ga(n,Wo):n);return t.parentNode=Jo,x([t])};var xa=Xo.select(Jo);da.on=function(n,t,e){var r=arguments.length;if(3>r){if("string"!=typeof n){2>r&&(t=!1);for(e in n)this.each(j(e,n[e],t));return this}if(2>r)return(r=this.node()["__on"+n])&&r._;e=!1}return this.each(j(n,t,e))};var Ma=Xo.map({mouseenter:"mouseover",mouseleave:"mouseout"});Ma.forEach(function(n){"on"+n in Wo&&Ma.remove(n)});var _a="onselectstart"in Wo?null:h(Jo.style,"userSelect"),ba=0;Xo.mouse=function(n){return Y(n,m())};var wa=/WebKit/.test(Go.navigator.userAgent)?-1:0;Xo.touches=function(n,t){return arguments.length<2&&(t=m().touches),t?Bo(t).map(function(t){var e=Y(n,t);return e.identifier=t.identifier,e}):[]},Xo.behavior.drag=function(){function n(){this.on("mousedown.drag",o).on("touchstart.drag",a)}function t(){return Xo.event.changedTouches[0].identifier}function e(n,t){return Xo.touches(n).filter(function(n){return n.identifier===t})[0]}function r(n,t,e,r){return function(){function o(){var n=t(l,g),e=n[0]-v[0],r=n[1]-v[1];d|=e|r,v=n,f({type:"drag",x:n[0]+c[0],y:n[1]+c[1],dx:e,dy:r})}function a(){m.on(e+"."+p,null).on(r+"."+p,null),y(d&&Xo.event.target===h),f({type:"dragend"})}var c,s=this,l=s.parentNode,f=u.of(s,arguments),h=Xo.event.target,g=n(),p=null==g?"drag":"drag-"+g,v=t(l,g),d=0,m=Xo.select(Go).on(e+"."+p,o).on(r+"."+p,a),y=O();i?(c=i.apply(s,arguments),c=[c.x-v[0],c.y-v[1]]):c=[0,0],f({type:"dragstart"})}}var u=y(n,"drag","dragstart","dragend"),i=null,o=r(g,Xo.mouse,"mousemove","mouseup"),a=r(t,e,"touchmove","touchend");return n.origin=function(t){return arguments.length?(i=t,n):i},Xo.rebind(n,u,"on")};var Sa=Math.PI,ka=2*Sa,Ea=Sa/2,Aa=1e-6,Ca=Aa*Aa,Na=Sa/180,La=180/Sa,Ta=Math.SQRT2,qa=2,za=4;Xo.interpolateZoom=function(n,t){function e(n){var t=n*y;if(m){var e=B(v),o=i/(qa*h)*(e*W(Ta*t+v)-$(v));return[r+o*s,u+o*l,i*e/B(Ta*t+v)]}return[r+n*s,u+n*l,i*Math.exp(Ta*t)]}var r=n[0],u=n[1],i=n[2],o=t[0],a=t[1],c=t[2],s=o-r,l=a-u,f=s*s+l*l,h=Math.sqrt(f),g=(c*c-i*i+za*f)/(2*i*qa*h),p=(c*c-i*i-za*f)/(2*c*qa*h),v=Math.log(Math.sqrt(g*g+1)-g),d=Math.log(Math.sqrt(p*p+1)-p),m=d-v,y=(m||Math.log(c/i))/Ta;return e.duration=1e3*y,e},Xo.behavior.zoom=function(){function n(n){n.on(A,s).on(Pa+".zoom",f).on(C,h).on("dblclick.zoom",g).on(L,l)}function t(n){return[(n[0]-S.x)/S.k,(n[1]-S.y)/S.k]}function e(n){return[n[0]*S.k+S.x,n[1]*S.k+S.y]}function r(n){S.k=Math.max(E[0],Math.min(E[1],n))}function u(n,t){t=e(t),S.x+=n[0]-t[0],S.y+=n[1]-t[1]}function i(){_&&_.domain(M.range().map(function(n){return(n-S.x)/S.k}).map(M.invert)),w&&w.domain(b.range().map(function(n){return(n-S.y)/S.k}).map(b.invert))}function o(n){n({type:"zoomstart"})}function a(n){i(),n({type:"zoom",scale:S.k,translate:[S.x,S.y]})}function c(n){n({type:"zoomend"})}function s(){function n(){l=1,u(Xo.mouse(r),g),a(i)}function e(){f.on(C,Go===r?h:null).on(N,null),p(l&&Xo.event.target===s),c(i)}var r=this,i=T.of(r,arguments),s=Xo.event.target,l=0,f=Xo.select(Go).on(C,n).on(N,e),g=t(Xo.mouse(r)),p=O();U.call(r),o(i)}function l(){function n(){var n=Xo.touches(g);return h=S.k,n.forEach(function(n){n.identifier in v&&(v[n.identifier]=t(n))}),n}function e(){for(var t=Xo.event.changedTouches,e=0,i=t.length;i>e;++e)v[t[e].identifier]=null;var o=n(),c=Date.now();if(1===o.length){if(500>c-x){var s=o[0],l=v[s.identifier];r(2*S.k),u(s,l),d(),a(p)}x=c}else if(o.length>1){var s=o[0],f=o[1],h=s[0]-f[0],g=s[1]-f[1];m=h*h+g*g}}function i(){for(var n,t,e,i,o=Xo.touches(g),c=0,s=o.length;s>c;++c,i=null)if(e=o[c],i=v[e.identifier]){if(t)break;n=e,t=i}if(i){var l=(l=e[0]-n[0])*l+(l=e[1]-n[1])*l,f=m&&Math.sqrt(l/m);n=[(n[0]+e[0])/2,(n[1]+e[1])/2],t=[(t[0]+i[0])/2,(t[1]+i[1])/2],r(f*h)}x=null,u(n,t),a(p)}function f(){if(Xo.event.touches.length){for(var t=Xo.event.changedTouches,e=0,r=t.length;r>e;++e)delete v[t[e].identifier];for(var u in v)return void n()}b.on(M,null).on(_,null),w.on(A,s).on(L,l),k(),c(p)}var h,g=this,p=T.of(g,arguments),v={},m=0,y=Xo.event.changedTouches[0].identifier,M="touchmove.zoom-"+y,_="touchend.zoom-"+y,b=Xo.select(Go).on(M,i).on(_,f),w=Xo.select(g).on(A,null).on(L,e),k=O();U.call(g),e(),o(p)}function f(){var n=T.of(this,arguments);m?clearTimeout(m):(U.call(this),o(n)),m=setTimeout(function(){m=null,c(n)},50),d();var e=v||Xo.mouse(this);p||(p=t(e)),r(Math.pow(2,.002*Ra())*S.k),u(e,p),a(n)}function h(){p=null}function g(){var n=T.of(this,arguments),e=Xo.mouse(this),i=t(e),s=Math.log(S.k)/Math.LN2;o(n),r(Math.pow(2,Xo.event.shiftKey?Math.ceil(s)-1:Math.floor(s)+1)),u(e,i),a(n),c(n)}var p,v,m,x,M,_,b,w,S={x:0,y:0,k:1},k=[960,500],E=Da,A="mousedown.zoom",C="mousemove.zoom",N="mouseup.zoom",L="touchstart.zoom",T=y(n,"zoomstart","zoom","zoomend");return n.event=function(n){n.each(function(){var n=T.of(this,arguments),t=S;ks?Xo.select(this).transition().each("start.zoom",function(){S=this.__chart__||{x:0,y:0,k:1},o(n)}).tween("zoom:zoom",function(){var e=k[0],r=k[1],u=e/2,i=r/2,o=Xo.interpolateZoom([(u-S.x)/S.k,(i-S.y)/S.k,e/S.k],[(u-t.x)/t.k,(i-t.y)/t.k,e/t.k]);return function(t){var r=o(t),c=e/r[2];this.__chart__=S={x:u-r[0]*c,y:i-r[1]*c,k:c},a(n)}}).each("end.zoom",function(){c(n)}):(this.__chart__=S,o(n),a(n),c(n))})},n.translate=function(t){return arguments.length?(S={x:+t[0],y:+t[1],k:S.k},i(),n):[S.x,S.y]},n.scale=function(t){return arguments.length?(S={x:S.x,y:S.y,k:+t},i(),n):S.k},n.scaleExtent=function(t){return arguments.length?(E=null==t?Da:[+t[0],+t[1]],n):E},n.center=function(t){return arguments.length?(v=t&&[+t[0],+t[1]],n):v},n.size=function(t){return arguments.length?(k=t&&[+t[0],+t[1]],n):k},n.x=function(t){return arguments.length?(_=t,M=t.copy(),S={x:0,y:0,k:1},n):_},n.y=function(t){return arguments.length?(w=t,b=t.copy(),S={x:0,y:0,k:1},n):w},Xo.rebind(n,T,"on")};var Ra,Da=[0,1/0],Pa="onwheel"in Wo?(Ra=function(){return-Xo.event.deltaY*(Xo.event.deltaMode?120:1)},"wheel"):"onmousewheel"in Wo?(Ra=function(){return Xo.event.wheelDelta},"mousewheel"):(Ra=function(){return-Xo.event.detail},"MozMousePixelScroll");G.prototype.toString=function(){return this.rgb()+""},Xo.hsl=function(n,t,e){return 1===arguments.length?n instanceof Q?K(n.h,n.s,n.l):dt(""+n,mt,K):K(+n,+t,+e)};var Ua=Q.prototype=new G;Ua.brighter=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,this.l/n)},Ua.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),K(this.h,this.s,n*this.l)},Ua.rgb=function(){return nt(this.h,this.s,this.l)},Xo.hcl=function(n,t,e){return 1===arguments.length?n instanceof et?tt(n.h,n.c,n.l):n instanceof it?at(n.l,n.a,n.b):at((n=yt((n=Xo.rgb(n)).r,n.g,n.b)).l,n.a,n.b):tt(+n,+t,+e)};var ja=et.prototype=new G;ja.brighter=function(n){return tt(this.h,this.c,Math.min(100,this.l+Ha*(arguments.length?n:1)))},ja.darker=function(n){return tt(this.h,this.c,Math.max(0,this.l-Ha*(arguments.length?n:1)))},ja.rgb=function(){return rt(this.h,this.c,this.l).rgb()},Xo.lab=function(n,t,e){return 1===arguments.length?n instanceof it?ut(n.l,n.a,n.b):n instanceof et?rt(n.l,n.c,n.h):yt((n=Xo.rgb(n)).r,n.g,n.b):ut(+n,+t,+e)};var Ha=18,Fa=.95047,Oa=1,Ya=1.08883,Ia=it.prototype=new G;Ia.brighter=function(n){return ut(Math.min(100,this.l+Ha*(arguments.length?n:1)),this.a,this.b)},Ia.darker=function(n){return ut(Math.max(0,this.l-Ha*(arguments.length?n:1)),this.a,this.b)},Ia.rgb=function(){return ot(this.l,this.a,this.b)},Xo.rgb=function(n,t,e){return 1===arguments.length?n instanceof pt?gt(n.r,n.g,n.b):dt(""+n,gt,nt):gt(~~n,~~t,~~e)};var Za=pt.prototype=new G;Za.brighter=function(n){n=Math.pow(.7,arguments.length?n:1);var t=this.r,e=this.g,r=this.b,u=30;return t||e||r?(t&&u>t&&(t=u),e&&u>e&&(e=u),r&&u>r&&(r=u),gt(Math.min(255,~~(t/n)),Math.min(255,~~(e/n)),Math.min(255,~~(r/n)))):gt(u,u,u)},Za.darker=function(n){return n=Math.pow(.7,arguments.length?n:1),gt(~~(n*this.r),~~(n*this.g),~~(n*this.b))},Za.hsl=function(){return mt(this.r,this.g,this.b)},Za.toString=function(){return"#"+vt(this.r)+vt(this.g)+vt(this.b)};var Va=Xo.map({aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074});Va.forEach(function(n,t){Va.set(n,ft(t))}),Xo.functor=_t,Xo.xhr=wt(bt),Xo.dsv=function(n,t){function e(n,e,i){arguments.length<3&&(i=e,e=null);var o=St(n,t,null==e?r:u(e),i);return o.row=function(n){return arguments.length?o.response(null==(e=n)?r:u(n)):e},o}function r(n){return e.parse(n.responseText)}function u(n){return function(t){return e.parse(t.responseText,n)}}function i(t){return t.map(o).join(n)}function o(n){return a.test(n)?'"'+n.replace(/\"/g,'""')+'"':n}var a=new RegExp('["'+n+"\n]"),c=n.charCodeAt(0);return e.parse=function(n,t){var r;return e.parseRows(n,function(n,e){if(r)return r(n,e-1);var u=new Function("d","return {"+n.map(function(n,t){return JSON.stringify(n)+": d["+t+"]"}).join(",")+"}");r=t?function(n,e){return t(u(n),e)}:u})},e.parseRows=function(n,t){function e(){if(l>=s)return o;if(u)return u=!1,i;var t=l;if(34===n.charCodeAt(t)){for(var e=t;e++<s;)if(34===n.charCodeAt(e)){if(34!==n.charCodeAt(e+1))break;++e}l=e+2;var r=n.charCodeAt(e+1);return 13===r?(u=!0,10===n.charCodeAt(e+2)&&++l):10===r&&(u=!0),n.substring(t+1,e).replace(/""/g,'"')}for(;s>l;){var r=n.charCodeAt(l++),a=1;if(10===r)u=!0;else if(13===r)u=!0,10===n.charCodeAt(l)&&(++l,++a);else if(r!==c)continue;return n.substring(t,l-a)}return n.substring(t)}for(var r,u,i={},o={},a=[],s=n.length,l=0,f=0;(r=e())!==o;){for(var h=[];r!==i&&r!==o;)h.push(r),r=e();(!t||(h=t(h,f++)))&&a.push(h)}return a},e.format=function(t){if(Array.isArray(t[0]))return e.formatRows(t);var r=new l,u=[];return t.forEach(function(n){for(var t in n)r.has(t)||u.push(r.add(t))}),[u.map(o).join(n)].concat(t.map(function(t){return u.map(function(n){return o(t[n])}).join(n)})).join("\n")},e.formatRows=function(n){return n.map(i).join("\n")},e},Xo.csv=Xo.dsv(",","text/csv"),Xo.tsv=Xo.dsv(" ","text/tab-separated-values");var Xa,$a,Ba,Wa,Ja,Ga=Go[h(Go,"requestAnimationFrame")]||function(n){setTimeout(n,17)};Xo.timer=function(n,t,e){var r=arguments.length;2>r&&(t=0),3>r&&(e=Date.now());var u=e+t,i={c:n,t:u,f:!1,n:null};$a?$a.n=i:Xa=i,$a=i,Ba||(Wa=clearTimeout(Wa),Ba=1,Ga(Et))},Xo.timer.flush=function(){At(),Ct()},Xo.round=function(n,t){return t?Math.round(n*(t=Math.pow(10,t)))/t:Math.round(n)};var Ka=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"].map(Lt);Xo.formatPrefix=function(n,t){var e=0;return n&&(0>n&&(n*=-1),t&&(n=Xo.round(n,Nt(n,t))),e=1+Math.floor(1e-12+Math.log(n)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Ka[8+e/3]};var Qa=/(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i,nc=Xo.map({b:function(n){return n.toString(2)},c:function(n){return String.fromCharCode(n)},o:function(n){return n.toString(8)},x:function(n){return n.toString(16)},X:function(n){return n.toString(16).toUpperCase()},g:function(n,t){return n.toPrecision(t)},e:function(n,t){return n.toExponential(t)},f:function(n,t){return n.toFixed(t)},r:function(n,t){return(n=Xo.round(n,Nt(n,t))).toFixed(Math.max(0,Math.min(20,Nt(n*(1+1e-15),t))))}}),tc=Xo.time={},ec=Date;zt.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){rc.setUTCDate.apply(this._,arguments)},setDay:function(){rc.setUTCDay.apply(this._,arguments)},setFullYear:function(){rc.setUTCFullYear.apply(this._,arguments)},setHours:function(){rc.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){rc.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){rc.setUTCMinutes.apply(this._,arguments)},setMonth:function(){rc.setUTCMonth.apply(this._,arguments)},setSeconds:function(){rc.setUTCSeconds.apply(this._,arguments)},setTime:function(){rc.setTime.apply(this._,arguments)}};var rc=Date.prototype;tc.year=Rt(function(n){return n=tc.day(n),n.setMonth(0,1),n},function(n,t){n.setFullYear(n.getFullYear()+t)},function(n){return n.getFullYear()}),tc.years=tc.year.range,tc.years.utc=tc.year.utc.range,tc.day=Rt(function(n){var t=new ec(2e3,0);return t.setFullYear(n.getFullYear(),n.getMonth(),n.getDate()),t},function(n,t){n.setDate(n.getDate()+t)},function(n){return n.getDate()-1}),tc.days=tc.day.range,tc.days.utc=tc.day.utc.range,tc.dayOfYear=function(n){var t=tc.year(n);return Math.floor((n-t-6e4*(n.getTimezoneOffset()-t.getTimezoneOffset()))/864e5)},["sunday","monday","tuesday","wednesday","thursday","friday","saturday"].forEach(function(n,t){t=7-t;var e=tc[n]=Rt(function(n){return(n=tc.day(n)).setDate(n.getDate()-(n.getDay()+t)%7),n},function(n,t){n.setDate(n.getDate()+7*Math.floor(t))},function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)-(e!==t)});tc[n+"s"]=e.range,tc[n+"s"].utc=e.utc.range,tc[n+"OfYear"]=function(n){var e=tc.year(n).getDay();return Math.floor((tc.dayOfYear(n)+(e+t)%7)/7)}}),tc.week=tc.sunday,tc.weeks=tc.sunday.range,tc.weeks.utc=tc.sunday.utc.range,tc.weekOfYear=tc.sundayOfYear;var uc={"-":"",_:" ",0:"0"},ic=/^\s*\d+/,oc=/^%/;Xo.locale=function(n){return{numberFormat:Tt(n),timeFormat:Pt(n)}};var ac=Xo.locale({decimal:".",thousands:",",grouping:[3],currency:["$",""],dateTime:"%a %b %e %X %Y",date:"%m/%d/%Y",time:"%H:%M:%S",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});Xo.format=ac.numberFormat,Xo.geo={},re.prototype={s:0,t:0,add:function(n){ue(n,this.t,cc),ue(cc.s,this.s,this),this.s?this.t+=cc.t:this.s=cc.t},reset:function(){this.s=this.t=0},valueOf:function(){return this.s}};var cc=new re;Xo.geo.stream=function(n,t){n&&sc.hasOwnProperty(n.type)?sc[n.type](n,t):ie(n,t)};var sc={Feature:function(n,t){ie(n.geometry,t)},FeatureCollection:function(n,t){for(var e=n.features,r=-1,u=e.length;++r<u;)ie(e[r].geometry,t)}},lc={Sphere:function(n,t){t.sphere()},Point:function(n,t){n=n.coordinates,t.point(n[0],n[1],n[2])},MultiPoint:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)n=e[r],t.point(n[0],n[1],n[2])},LineString:function(n,t){oe(n.coordinates,t,0)},MultiLineString:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)oe(e[r],t,0)},Polygon:function(n,t){ae(n.coordinates,t)},MultiPolygon:function(n,t){for(var e=n.coordinates,r=-1,u=e.length;++r<u;)ae(e[r],t)},GeometryCollection:function(n,t){for(var e=n.geometries,r=-1,u=e.length;++r<u;)ie(e[r],t)}};Xo.geo.area=function(n){return fc=0,Xo.geo.stream(n,gc),fc};var fc,hc=new re,gc={sphere:function(){fc+=4*Sa},point:g,lineStart:g,lineEnd:g,polygonStart:function(){hc.reset(),gc.lineStart=ce},polygonEnd:function(){var n=2*hc;fc+=0>n?4*Sa+n:n,gc.lineStart=gc.lineEnd=gc.point=g}};Xo.geo.bounds=function(){function n(n,t){x.push(M=[l=n,h=n]),f>t&&(f=t),t>g&&(g=t)}function t(t,e){var r=se([t*Na,e*Na]);if(m){var u=fe(m,r),i=[u[1],-u[0],0],o=fe(i,u);pe(o),o=ve(o);var c=t-p,s=c>0?1:-1,v=o[0]*La*s,d=oa(c)>180;if(d^(v>s*p&&s*t>v)){var y=o[1]*La;y>g&&(g=y)}else if(v=(v+360)%360-180,d^(v>s*p&&s*t>v)){var y=-o[1]*La;f>y&&(f=y)}else f>e&&(f=e),e>g&&(g=e);d?p>t?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t):h>=l?(l>t&&(l=t),t>h&&(h=t)):t>p?a(l,t)>a(l,h)&&(h=t):a(t,h)>a(l,h)&&(l=t)}else n(t,e);m=r,p=t}function e(){_.point=t}function r(){M[0]=l,M[1]=h,_.point=n,m=null}function u(n,e){if(m){var r=n-p;y+=oa(r)>180?r+(r>0?360:-360):r}else v=n,d=e;gc.point(n,e),t(n,e)}function i(){gc.lineStart()}function o(){u(v,d),gc.lineEnd(),oa(y)>Aa&&(l=-(h=180)),M[0]=l,M[1]=h,m=null}function a(n,t){return(t-=n)<0?t+360:t}function c(n,t){return n[0]-t[0]}function s(n,t){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:n<t[0]||t[1]<n}var l,f,h,g,p,v,d,m,y,x,M,_={point:n,lineStart:e,lineEnd:r,polygonStart:function(){_.point=u,_.lineStart=i,_.lineEnd=o,y=0,gc.polygonStart()},polygonEnd:function(){gc.polygonEnd(),_.point=n,_.lineStart=e,_.lineEnd=r,0>hc?(l=-(h=180),f=-(g=90)):y>Aa?g=90:-Aa>y&&(f=-90),M[0]=l,M[1]=h}};return function(n){g=h=-(l=f=1/0),x=[],Xo.geo.stream(n,_);var t=x.length;if(t){x.sort(c);for(var e,r=1,u=x[0],i=[u];t>r;++r)e=x[r],s(e[0],u)||s(e[1],u)?(a(u[0],e[1])>a(u[0],u[1])&&(u[1]=e[1]),a(e[0],u[1])>a(u[0],u[1])&&(u[0]=e[0])):i.push(u=e);for(var o,e,p=-1/0,t=i.length-1,r=0,u=i[t];t>=r;u=e,++r)e=i[r],(o=a(u[1],e[0]))>p&&(p=o,l=e[0],h=u[1])}return x=M=null,1/0===l||1/0===f?[[0/0,0/0],[0/0,0/0]]:[[l,f],[h,g]]}}(),Xo.geo.centroid=function(n){pc=vc=dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,kc);var t=bc,e=wc,r=Sc,u=t*t+e*e+r*r;return Ca>u&&(t=xc,e=Mc,r=_c,Aa>vc&&(t=dc,e=mc,r=yc),u=t*t+e*e+r*r,Ca>u)?[0/0,0/0]:[Math.atan2(e,t)*La,X(r/Math.sqrt(u))*La]};var pc,vc,dc,mc,yc,xc,Mc,_c,bc,wc,Sc,kc={sphere:g,point:me,lineStart:xe,lineEnd:Me,polygonStart:function(){kc.lineStart=_e},polygonEnd:function(){kc.lineStart=xe}},Ec=Ee(be,Te,ze,[-Sa,-Sa/2]),Ac=1e9;Xo.geo.clipExtent=function(){var n,t,e,r,u,i,o={stream:function(n){return u&&(u.valid=!1),u=i(n),u.valid=!0,u},extent:function(a){return arguments.length?(i=Pe(n=+a[0][0],t=+a[0][1],e=+a[1][0],r=+a[1][1]),u&&(u.valid=!1,u=null),o):[[n,t],[e,r]]}};return o.extent([[0,0],[960,500]])},(Xo.geo.conicEqualArea=function(){return je(He)}).raw=He,Xo.geo.albers=function(){return Xo.geo.conicEqualArea().rotate([96,0]).center([-.6,38.7]).parallels([29.5,45.5]).scale(1070)},Xo.geo.albersUsa=function(){function n(n){var i=n[0],o=n[1];return t=null,e(i,o),t||(r(i,o),t)||u(i,o),t}var t,e,r,u,i=Xo.geo.albers(),o=Xo.geo.conicEqualArea().rotate([154,0]).center([-2,58.5]).parallels([55,65]),a=Xo.geo.conicEqualArea().rotate([157,0]).center([-3,19.9]).parallels([8,18]),c={point:function(n,e){t=[n,e]}};return n.invert=function(n){var t=i.scale(),e=i.translate(),r=(n[0]-e[0])/t,u=(n[1]-e[1])/t;return(u>=.12&&.234>u&&r>=-.425&&-.214>r?o:u>=.166&&.234>u&&r>=-.214&&-.115>r?a:i).invert(n)},n.stream=function(n){var t=i.stream(n),e=o.stream(n),r=a.stream(n);return{point:function(n,u){t.point(n,u),e.point(n,u),r.point(n,u)},sphere:function(){t.sphere(),e.sphere(),r.sphere()},lineStart:function(){t.lineStart(),e.lineStart(),r.lineStart()},lineEnd:function(){t.lineEnd(),e.lineEnd(),r.lineEnd()},polygonStart:function(){t.polygonStart(),e.polygonStart(),r.polygonStart()},polygonEnd:function(){t.polygonEnd(),e.polygonEnd(),r.polygonEnd()}}},n.precision=function(t){return arguments.length?(i.precision(t),o.precision(t),a.precision(t),n):i.precision()},n.scale=function(t){return arguments.length?(i.scale(t),o.scale(.35*t),a.scale(t),n.translate(i.translate())):i.scale()},n.translate=function(t){if(!arguments.length)return i.translate();var s=i.scale(),l=+t[0],f=+t[1];return e=i.translate(t).clipExtent([[l-.455*s,f-.238*s],[l+.455*s,f+.238*s]]).stream(c).point,r=o.translate([l-.307*s,f+.201*s]).clipExtent([[l-.425*s+Aa,f+.12*s+Aa],[l-.214*s-Aa,f+.234*s-Aa]]).stream(c).point,u=a.translate([l-.205*s,f+.212*s]).clipExtent([[l-.214*s+Aa,f+.166*s+Aa],[l-.115*s-Aa,f+.234*s-Aa]]).stream(c).point,n},n.scale(1070)};var Cc,Nc,Lc,Tc,qc,zc,Rc={point:g,lineStart:g,lineEnd:g,polygonStart:function(){Nc=0,Rc.lineStart=Fe},polygonEnd:function(){Rc.lineStart=Rc.lineEnd=Rc.point=g,Cc+=oa(Nc/2)}},Dc={point:Oe,lineStart:g,lineEnd:g,polygonStart:g,polygonEnd:g},Pc={point:Ze,lineStart:Ve,lineEnd:Xe,polygonStart:function(){Pc.lineStart=$e},polygonEnd:function(){Pc.point=Ze,Pc.lineStart=Ve,Pc.lineEnd=Xe}};Xo.geo.path=function(){function n(n){return n&&("function"==typeof a&&i.pointRadius(+a.apply(this,arguments)),o&&o.valid||(o=u(i)),Xo.geo.stream(n,o)),i.result()}function t(){return o=null,n}var e,r,u,i,o,a=4.5;return n.area=function(n){return Cc=0,Xo.geo.stream(n,u(Rc)),Cc},n.centroid=function(n){return dc=mc=yc=xc=Mc=_c=bc=wc=Sc=0,Xo.geo.stream(n,u(Pc)),Sc?[bc/Sc,wc/Sc]:_c?[xc/_c,Mc/_c]:yc?[dc/yc,mc/yc]:[0/0,0/0]},n.bounds=function(n){return qc=zc=-(Lc=Tc=1/0),Xo.geo.stream(n,u(Dc)),[[Lc,Tc],[qc,zc]]},n.projection=function(n){return arguments.length?(u=(e=n)?n.stream||Je(n):bt,t()):e},n.context=function(n){return arguments.length?(i=null==(r=n)?new Ye:new Be(n),"function"!=typeof a&&i.pointRadius(a),t()):r},n.pointRadius=function(t){return arguments.length?(a="function"==typeof t?t:(i.pointRadius(+t),+t),n):a},n.projection(Xo.geo.albersUsa()).context(null)},Xo.geo.transform=function(n){return{stream:function(t){var e=new Ge(t);for(var r in n)e[r]=n[r];return e}}},Ge.prototype={point:function(n,t){this.stream.point(n,t)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}},Xo.geo.projection=Qe,Xo.geo.projectionMutator=nr,(Xo.geo.equirectangular=function(){return Qe(er)}).raw=er.invert=er,Xo.geo.rotation=function(n){function t(t){return t=n(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t}return n=ur(n[0]%360*Na,n[1]*Na,n.length>2?n[2]*Na:0),t.invert=function(t){return t=n.invert(t[0]*Na,t[1]*Na),t[0]*=La,t[1]*=La,t},t},rr.invert=er,Xo.geo.circle=function(){function n(){var n="function"==typeof r?r.apply(this,arguments):r,t=ur(-n[0]*Na,-n[1]*Na,0).invert,u=[];return e(null,null,1,{point:function(n,e){u.push(n=t(n,e)),n[0]*=La,n[1]*=La}}),{type:"Polygon",coordinates:[u]}}var t,e,r=[0,0],u=6;return n.origin=function(t){return arguments.length?(r=t,n):r},n.angle=function(r){return arguments.length?(e=cr((t=+r)*Na,u*Na),n):t},n.precision=function(r){return arguments.length?(e=cr(t*Na,(u=+r)*Na),n):u},n.angle(90)},Xo.geo.distance=function(n,t){var e,r=(t[0]-n[0])*Na,u=n[1]*Na,i=t[1]*Na,o=Math.sin(r),a=Math.cos(r),c=Math.sin(u),s=Math.cos(u),l=Math.sin(i),f=Math.cos(i);return Math.atan2(Math.sqrt((e=f*o)*e+(e=s*l-c*f*a)*e),c*l+s*f*a)},Xo.geo.graticule=function(){function n(){return{type:"MultiLineString",coordinates:t()}}function t(){return Xo.range(Math.ceil(i/d)*d,u,d).map(h).concat(Xo.range(Math.ceil(s/m)*m,c,m).map(g)).concat(Xo.range(Math.ceil(r/p)*p,e,p).filter(function(n){return oa(n%d)>Aa}).map(l)).concat(Xo.range(Math.ceil(a/v)*v,o,v).filter(function(n){return oa(n%m)>Aa}).map(f))}var e,r,u,i,o,a,c,s,l,f,h,g,p=10,v=p,d=90,m=360,y=2.5;return n.lines=function(){return t().map(function(n){return{type:"LineString",coordinates:n}})},n.outline=function(){return{type:"Polygon",coordinates:[h(i).concat(g(c).slice(1),h(u).reverse().slice(1),g(s).reverse().slice(1))]}},n.extent=function(t){return arguments.length?n.majorExtent(t).minorExtent(t):n.minorExtent()},n.majorExtent=function(t){return arguments.length?(i=+t[0][0],u=+t[1][0],s=+t[0][1],c=+t[1][1],i>u&&(t=i,i=u,u=t),s>c&&(t=s,s=c,c=t),n.precision(y)):[[i,s],[u,c]]},n.minorExtent=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],a=+t[0][1],o=+t[1][1],r>e&&(t=r,r=e,e=t),a>o&&(t=a,a=o,o=t),n.precision(y)):[[r,a],[e,o]]},n.step=function(t){return arguments.length?n.majorStep(t).minorStep(t):n.minorStep()},n.majorStep=function(t){return arguments.length?(d=+t[0],m=+t[1],n):[d,m]},n.minorStep=function(t){return arguments.length?(p=+t[0],v=+t[1],n):[p,v]},n.precision=function(t){return arguments.length?(y=+t,l=lr(a,o,90),f=fr(r,e,y),h=lr(s,c,90),g=fr(i,u,y),n):y},n.majorExtent([[-180,-90+Aa],[180,90-Aa]]).minorExtent([[-180,-80-Aa],[180,80+Aa]])},Xo.geo.greatArc=function(){function n(){return{type:"LineString",coordinates:[t||r.apply(this,arguments),e||u.apply(this,arguments)]}}var t,e,r=hr,u=gr;return n.distance=function(){return Xo.geo.distance(t||r.apply(this,arguments),e||u.apply(this,arguments))},n.source=function(e){return arguments.length?(r=e,t="function"==typeof e?null:e,n):r},n.target=function(t){return arguments.length?(u=t,e="function"==typeof t?null:t,n):u},n.precision=function(){return arguments.length?n:0},n},Xo.geo.interpolate=function(n,t){return pr(n[0]*Na,n[1]*Na,t[0]*Na,t[1]*Na)},Xo.geo.length=function(n){return Uc=0,Xo.geo.stream(n,jc),Uc};var Uc,jc={sphere:g,point:g,lineStart:vr,lineEnd:g,polygonStart:g,polygonEnd:g},Hc=dr(function(n){return Math.sqrt(2/(1+n))},function(n){return 2*Math.asin(n/2)});(Xo.geo.azimuthalEqualArea=function(){return Qe(Hc)}).raw=Hc;var Fc=dr(function(n){var t=Math.acos(n);return t&&t/Math.sin(t)},bt);(Xo.geo.azimuthalEquidistant=function(){return Qe(Fc)}).raw=Fc,(Xo.geo.conicConformal=function(){return je(mr)}).raw=mr,(Xo.geo.conicEquidistant=function(){return je(yr)}).raw=yr;var Oc=dr(function(n){return 1/n},Math.atan);(Xo.geo.gnomonic=function(){return Qe(Oc)}).raw=Oc,xr.invert=function(n,t){return[n,2*Math.atan(Math.exp(t))-Ea]},(Xo.geo.mercator=function(){return Mr(xr)}).raw=xr;var Yc=dr(function(){return 1},Math.asin);(Xo.geo.orthographic=function(){return Qe(Yc)}).raw=Yc;var Ic=dr(function(n){return 1/(1+n)},function(n){return 2*Math.atan(n)});(Xo.geo.stereographic=function(){return Qe(Ic)}).raw=Ic,_r.invert=function(n,t){return[-t,2*Math.atan(Math.exp(n))-Ea]},(Xo.geo.transverseMercator=function(){var n=Mr(_r),t=n.center,e=n.rotate;return n.center=function(n){return n?t([-n[1],n[0]]):(n=t(),[-n[1],n[0]])},n.rotate=function(n){return n?e([n[0],n[1],n.length>2?n[2]+90:90]):(n=e(),[n[0],n[1],n[2]-90])},n.rotate([0,0])}).raw=_r,Xo.geom={},Xo.geom.hull=function(n){function t(n){if(n.length<3)return[];var t,u=_t(e),i=_t(r),o=n.length,a=[],c=[];for(t=0;o>t;t++)a.push([+u.call(this,n[t],t),+i.call(this,n[t],t),t]);for(a.sort(kr),t=0;o>t;t++)c.push([a[t][0],-a[t][1]]);var s=Sr(a),l=Sr(c),f=l[0]===s[0],h=l[l.length-1]===s[s.length-1],g=[];for(t=s.length-1;t>=0;--t)g.push(n[a[s[t]][2]]);for(t=+f;t<l.length-h;++t)g.push(n[a[l[t]][2]]);return g}var e=br,r=wr;return arguments.length?t(n):(t.x=function(n){return arguments.length?(e=n,t):e},t.y=function(n){return arguments.length?(r=n,t):r},t)},Xo.geom.polygon=function(n){return fa(n,Zc),n};var Zc=Xo.geom.polygon.prototype=[];Zc.area=function(){for(var n,t=-1,e=this.length,r=this[e-1],u=0;++t<e;)n=r,r=this[t],u+=n[1]*r[0]-n[0]*r[1];return.5*u},Zc.centroid=function(n){var t,e,r=-1,u=this.length,i=0,o=0,a=this[u-1];for(arguments.length||(n=-1/(6*this.area()));++r<u;)t=a,a=this[r],e=t[0]*a[1]-a[0]*t[1],i+=(t[0]+a[0])*e,o+=(t[1]+a[1])*e;return[i*n,o*n]},Zc.clip=function(n){for(var t,e,r,u,i,o,a=Cr(n),c=-1,s=this.length-Cr(this),l=this[s-1];++c<s;){for(t=n.slice(),n.length=0,u=this[c],i=t[(r=t.length-a)-1],e=-1;++e<r;)o=t[e],Er(o,l,u)?(Er(i,l,u)||n.push(Ar(i,o,l,u)),n.push(o)):Er(i,l,u)&&n.push(Ar(i,o,l,u)),i=o;a&&n.push(n[0]),l=u}return n};var Vc,Xc,$c,Bc,Wc,Jc=[],Gc=[];Pr.prototype.prepare=function(){for(var n,t=this.edges,e=t.length;e--;)n=t[e].edge,n.b&&n.a||t.splice(e,1);return t.sort(jr),t.length},Br.prototype={start:function(){return this.edge.l===this.site?this.edge.a:this.edge.b},end:function(){return this.edge.l===this.site?this.edge.b:this.edge.a}},Wr.prototype={insert:function(n,t){var e,r,u;if(n){if(t.P=n,t.N=n.N,n.N&&(n.N.P=t),n.N=t,n.R){for(n=n.R;n.L;)n=n.L;n.L=t}else n.R=t;e=n}else this._?(n=Qr(this._),t.P=null,t.N=n,n.P=n.L=t,e=n):(t.P=t.N=null,this._=t,e=null);for(t.L=t.R=null,t.U=e,t.C=!0,n=t;e&&e.C;)r=e.U,e===r.L?(u=r.R,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.R&&(Gr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Kr(this,r))):(u=r.L,u&&u.C?(e.C=u.C=!1,r.C=!0,n=r):(n===e.L&&(Kr(this,e),n=e,e=n.U),e.C=!1,r.C=!0,Gr(this,r))),e=n.U;this._.C=!1},remove:function(n){n.N&&(n.N.P=n.P),n.P&&(n.P.N=n.N),n.N=n.P=null;var t,e,r,u=n.U,i=n.L,o=n.R;if(e=i?o?Qr(o):i:o,u?u.L===n?u.L=e:u.R=e:this._=e,i&&o?(r=e.C,e.C=n.C,e.L=i,i.U=e,e!==o?(u=e.U,e.U=n.U,n=e.R,u.L=n,e.R=o,o.U=e):(e.U=u,u=e,n=e.R)):(r=n.C,n=e),n&&(n.U=u),!r){if(n&&n.C)return n.C=!1,void 0;do{if(n===this._)break;if(n===u.L){if(t=u.R,t.C&&(t.C=!1,u.C=!0,Gr(this,u),t=u.R),t.L&&t.L.C||t.R&&t.R.C){t.R&&t.R.C||(t.L.C=!1,t.C=!0,Kr(this,t),t=u.R),t.C=u.C,u.C=t.R.C=!1,Gr(this,u),n=this._;break}}else if(t=u.L,t.C&&(t.C=!1,u.C=!0,Kr(this,u),t=u.L),t.L&&t.L.C||t.R&&t.R.C){t.L&&t.L.C||(t.R.C=!1,t.C=!0,Gr(this,t),t=u.L),t.C=u.C,u.C=t.L.C=!1,Kr(this,u),n=this._;break}t.C=!0,n=u,u=u.U}while(!n.C);n&&(n.C=!1)}}},Xo.geom.voronoi=function(n){function t(n){var t=new Array(n.length),r=a[0][0],u=a[0][1],i=a[1][0],o=a[1][1];return nu(e(n),a).cells.forEach(function(e,a){var c=e.edges,s=e.site,l=t[a]=c.length?c.map(function(n){var t=n.start();return[t.x,t.y]}):s.x>=r&&s.x<=i&&s.y>=u&&s.y<=o?[[r,o],[i,o],[i,u],[r,u]]:[];l.point=n[a]}),t}function e(n){return n.map(function(n,t){return{x:Math.round(i(n,t)/Aa)*Aa,y:Math.round(o(n,t)/Aa)*Aa,i:t}})}var r=br,u=wr,i=r,o=u,a=Kc;return n?t(n):(t.links=function(n){return nu(e(n)).edges.filter(function(n){return n.l&&n.r}).map(function(t){return{source:n[t.l.i],target:n[t.r.i]}})},t.triangles=function(n){var t=[];return nu(e(n)).cells.forEach(function(e,r){for(var u,i,o=e.site,a=e.edges.sort(jr),c=-1,s=a.length,l=a[s-1].edge,f=l.l===o?l.r:l.l;++c<s;)u=l,i=f,l=a[c].edge,f=l.l===o?l.r:l.l,r<i.i&&r<f.i&&eu(o,i,f)<0&&t.push([n[r],n[i.i],n[f.i]])}),t},t.x=function(n){return arguments.length?(i=_t(r=n),t):r},t.y=function(n){return arguments.length?(o=_t(u=n),t):u},t.clipExtent=function(n){return arguments.length?(a=null==n?Kc:n,t):a===Kc?null:a},t.size=function(n){return arguments.length?t.clipExtent(n&&[[0,0],n]):a===Kc?null:a&&a[1]},t)};var Kc=[[-1e6,-1e6],[1e6,1e6]];Xo.geom.delaunay=function(n){return Xo.geom.voronoi().triangles(n)},Xo.geom.quadtree=function(n,t,e,r,u){function i(n){function i(n,t,e,r,u,i,o,a){if(!isNaN(e)&&!isNaN(r))if(n.leaf){var c=n.x,l=n.y;if(null!=c)if(oa(c-e)+oa(l-r)<.01)s(n,t,e,r,u,i,o,a);else{var f=n.point;n.x=n.y=n.point=null,s(n,f,c,l,u,i,o,a),s(n,t,e,r,u,i,o,a)}else n.x=e,n.y=r,n.point=t}else s(n,t,e,r,u,i,o,a)}function s(n,t,e,r,u,o,a,c){var s=.5*(u+a),l=.5*(o+c),f=e>=s,h=r>=l,g=(h<<1)+f;n.leaf=!1,n=n.nodes[g]||(n.nodes[g]=iu()),f?u=s:a=s,h?o=l:c=l,i(n,t,e,r,u,o,a,c)}var l,f,h,g,p,v,d,m,y,x=_t(a),M=_t(c);if(null!=t)v=t,d=e,m=r,y=u;else if(m=y=-(v=d=1/0),f=[],h=[],p=n.length,o)for(g=0;p>g;++g)l=n[g],l.x<v&&(v=l.x),l.y<d&&(d=l.y),l.x>m&&(m=l.x),l.y>y&&(y=l.y),f.push(l.x),h.push(l.y);else for(g=0;p>g;++g){var _=+x(l=n[g],g),b=+M(l,g);v>_&&(v=_),d>b&&(d=b),_>m&&(m=_),b>y&&(y=b),f.push(_),h.push(b)}var w=m-v,S=y-d;w>S?y=d+w:m=v+S;var k=iu();if(k.add=function(n){i(k,n,+x(n,++g),+M(n,g),v,d,m,y)},k.visit=function(n){ou(n,k,v,d,m,y)},g=-1,null==t){for(;++g<p;)i(k,n[g],f[g],h[g],v,d,m,y);--g}else n.forEach(k.add);return f=h=n=l=null,k}var o,a=br,c=wr;return(o=arguments.length)?(a=ru,c=uu,3===o&&(u=e,r=t,e=t=0),i(n)):(i.x=function(n){return arguments.length?(a=n,i):a},i.y=function(n){return arguments.length?(c=n,i):c},i.extent=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=+n[0][0],e=+n[0][1],r=+n[1][0],u=+n[1][1]),i):null==t?null:[[t,e],[r,u]]},i.size=function(n){return arguments.length?(null==n?t=e=r=u=null:(t=e=0,r=+n[0],u=+n[1]),i):null==t?null:[r-t,u-e]},i)},Xo.interpolateRgb=au,Xo.interpolateObject=cu,Xo.interpolateNumber=su,Xo.interpolateString=lu;var Qc=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;Xo.interpolate=fu,Xo.interpolators=[function(n,t){var e=typeof t;return("string"===e?Va.has(t)||/^(#|rgb\(|hsl\()/.test(t)?au:lu:t instanceof G?au:"object"===e?Array.isArray(t)?hu:cu:su)(n,t)}],Xo.interpolateArray=hu;var ns=function(){return bt},ts=Xo.map({linear:ns,poly:xu,quad:function(){return du},cubic:function(){return mu},sin:function(){return Mu},exp:function(){return _u},circle:function(){return bu},elastic:wu,back:Su,bounce:function(){return ku}}),es=Xo.map({"in":bt,out:pu,"in-out":vu,"out-in":function(n){return vu(pu(n))}});Xo.ease=function(n){var t=n.indexOf("-"),e=t>=0?n.substring(0,t):n,r=t>=0?n.substring(t+1):"in";return e=ts.get(e)||ns,r=es.get(r)||bt,gu(r(e.apply(null,$o.call(arguments,1))))},Xo.interpolateHcl=Eu,Xo.interpolateHsl=Au,Xo.interpolateLab=Cu,Xo.interpolateRound=Nu,Xo.transform=function(n){var t=Wo.createElementNS(Xo.ns.prefix.svg,"g");return(Xo.transform=function(n){if(null!=n){t.setAttribute("transform",n);var e=t.transform.baseVal.consolidate()}return new Lu(e?e.matrix:rs)})(n)},Lu.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var rs={a:1,b:0,c:0,d:1,e:0,f:0};Xo.interpolateTransform=Ru,Xo.layout={},Xo.layout.bundle=function(){return function(n){for(var t=[],e=-1,r=n.length;++e<r;)t.push(Uu(n[e]));return t}},Xo.layout.chord=function(){function n(){var n,s,f,h,g,p={},v=[],d=Xo.range(i),m=[];for(e=[],r=[],n=0,h=-1;++h<i;){for(s=0,g=-1;++g<i;)s+=u[h][g];v.push(s),m.push(Xo.range(i)),n+=s}for(o&&d.sort(function(n,t){return o(v[n],v[t])}),a&&m.forEach(function(n,t){n.sort(function(n,e){return a(u[t][n],u[t][e])})}),n=(ka-l*i)/n,s=0,h=-1;++h<i;){for(f=s,g=-1;++g<i;){var y=d[h],x=m[y][g],M=u[y][x],_=s,b=s+=M*n;p[y+"-"+x]={index:y,subindex:x,startAngle:_,endAngle:b,value:M}}r[y]={index:y,startAngle:f,endAngle:s,value:(s-f)/n},s+=l}for(h=-1;++h<i;)for(g=h-1;++g<i;){var w=p[h+"-"+g],S=p[g+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&t()}function t(){e.sort(function(n,t){return c((n.source.value+n.target.value)/2,(t.source.value+t.target.value)/2)})}var e,r,u,i,o,a,c,s={},l=0;return s.matrix=function(n){return arguments.length?(i=(u=n)&&u.length,e=r=null,s):u},s.padding=function(n){return arguments.length?(l=n,e=r=null,s):l},s.sortGroups=function(n){return arguments.length?(o=n,e=r=null,s):o},s.sortSubgroups=function(n){return arguments.length?(a=n,e=null,s):a},s.sortChords=function(n){return arguments.length?(c=n,e&&t(),s):c},s.chords=function(){return e||n(),e},s.groups=function(){return r||n(),r},s},Xo.layout.force=function(){function n(n){return function(t,e,r,u){if(t.point!==n){var i=t.cx-n.x,o=t.cy-n.y,a=u-e,c=i*i+o*o;if(c>a*a/d){if(p>c){var s=t.charge/c;n.px-=i*s,n.py-=o*s}return!0}if(t.point&&c&&p>c){var s=t.pointCharge/c;n.px-=i*s,n.py-=o*s}}return!t.charge}}function t(n){n.px=Xo.event.x,n.py=Xo.event.y,a.resume()}var e,r,u,i,o,a={},c=Xo.dispatch("start","tick","end"),s=[1,1],l=.9,f=us,h=is,g=-30,p=os,v=.1,d=.64,m=[],y=[];return a.tick=function(){if((r*=.99)<.005)return c.end({type:"end",alpha:r=0}),!0;var t,e,a,f,h,p,d,x,M,_=m.length,b=y.length;for(e=0;b>e;++e)a=y[e],f=a.source,h=a.target,x=h.x-f.x,M=h.y-f.y,(p=x*x+M*M)&&(p=r*i[e]*((p=Math.sqrt(p))-u[e])/p,x*=p,M*=p,h.x-=x*(d=f.weight/(h.weight+f.weight)),h.y-=M*d,f.x+=x*(d=1-d),f.y+=M*d);if((d=r*v)&&(x=s[0]/2,M=s[1]/2,e=-1,d))for(;++e<_;)a=m[e],a.x+=(x-a.x)*d,a.y+=(M-a.y)*d;if(g)for(Zu(t=Xo.geom.quadtree(m),r,o),e=-1;++e<_;)(a=m[e]).fixed||t.visit(n(a));for(e=-1;++e<_;)a=m[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*l,a.y-=(a.py-(a.py=a.y))*l);c.tick({type:"tick",alpha:r})},a.nodes=function(n){return arguments.length?(m=n,a):m},a.links=function(n){return arguments.length?(y=n,a):y},a.size=function(n){return arguments.length?(s=n,a):s},a.linkDistance=function(n){return arguments.length?(f="function"==typeof n?n:+n,a):f},a.distance=a.linkDistance,a.linkStrength=function(n){return arguments.length?(h="function"==typeof n?n:+n,a):h},a.friction=function(n){return arguments.length?(l=+n,a):l},a.charge=function(n){return arguments.length?(g="function"==typeof n?n:+n,a):g},a.chargeDistance=function(n){return arguments.length?(p=n*n,a):Math.sqrt(p)},a.gravity=function(n){return arguments.length?(v=+n,a):v},a.theta=function(n){return arguments.length?(d=n*n,a):Math.sqrt(d)},a.alpha=function(n){return arguments.length?(n=+n,r?r=n>0?n:0:n>0&&(c.start({type:"start",alpha:r=n}),Xo.timer(a.tick)),a):r},a.start=function(){function n(n,r){if(!e){for(e=new Array(c),a=0;c>a;++a)e[a]=[];for(a=0;s>a;++a){var u=y[a];e[u.source.index].push(u.target),e[u.target.index].push(u.source)}}for(var i,o=e[t],a=-1,s=o.length;++a<s;)if(!isNaN(i=o[a][n]))return i;return Math.random()*r}var t,e,r,c=m.length,l=y.length,p=s[0],v=s[1];for(t=0;c>t;++t)(r=m[t]).index=t,r.weight=0;for(t=0;l>t;++t)r=y[t],"number"==typeof r.source&&(r.source=m[r.source]),"number"==typeof r.target&&(r.target=m[r.target]),++r.source.weight,++r.target.weight;for(t=0;c>t;++t)r=m[t],isNaN(r.x)&&(r.x=n("x",p)),isNaN(r.y)&&(r.y=n("y",v)),isNaN(r.px)&&(r.px=r.x),isNaN(r.py)&&(r.py=r.y);if(u=[],"function"==typeof f)for(t=0;l>t;++t)u[t]=+f.call(this,y[t],t);else for(t=0;l>t;++t)u[t]=f;if(i=[],"function"==typeof h)for(t=0;l>t;++t)i[t]=+h.call(this,y[t],t);else for(t=0;l>t;++t)i[t]=h;if(o=[],"function"==typeof g)for(t=0;c>t;++t)o[t]=+g.call(this,m[t],t);else for(t=0;c>t;++t)o[t]=g;return a.resume()},a.resume=function(){return a.alpha(.1)},a.stop=function(){return a.alpha(0)},a.drag=function(){return e||(e=Xo.behavior.drag().origin(bt).on("dragstart.force",Fu).on("drag.force",t).on("dragend.force",Ou)),arguments.length?(this.on("mouseover.force",Yu).on("mouseout.force",Iu).call(e),void 0):e},Xo.rebind(a,c,"on")};var us=20,is=1,os=1/0;Xo.layout.hierarchy=function(){function n(t,o,a){var c=u.call(e,t,o);if(t.depth=o,a.push(t),c&&(s=c.length)){for(var s,l,f=-1,h=t.children=new Array(s),g=0,p=o+1;++f<s;)l=h[f]=n(c[f],p,a),l.parent=t,g+=l.value;r&&h.sort(r),i&&(t.value=g)}else delete t.children,i&&(t.value=+i.call(e,t,o)||0);return t}function t(n,r){var u=n.children,o=0;if(u&&(a=u.length))for(var a,c=-1,s=r+1;++c<a;)o+=t(u[c],s);else i&&(o=+i.call(e,n,r)||0);return i&&(n.value=o),o}function e(t){var e=[];return n(t,0,e),e}var r=Bu,u=Xu,i=$u;return e.sort=function(n){return arguments.length?(r=n,e):r},e.children=function(n){return arguments.length?(u=n,e):u},e.value=function(n){return arguments.length?(i=n,e):i},e.revalue=function(n){return t(n,0),n},e},Xo.layout.partition=function(){function n(t,e,r,u){var i=t.children;if(t.x=e,t.y=t.depth*u,t.dx=r,t.dy=u,i&&(o=i.length)){var o,a,c,s=-1;for(r=t.value?r/t.value:0;++s<o;)n(a=i[s],e,c=a.value*r,u),e+=c}}function t(n){var e=n.children,r=0;if(e&&(u=e.length))for(var u,i=-1;++i<u;)r=Math.max(r,t(e[i]));return 1+r}function e(e,i){var o=r.call(this,e,i);return n(o[0],0,u[0],u[1]/t(o[0])),o}var r=Xo.layout.hierarchy(),u=[1,1];return e.size=function(n){return arguments.length?(u=n,e):u},Vu(e,r)},Xo.layout.pie=function(){function n(i){var o=i.map(function(e,r){return+t.call(n,e,r)}),a=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof u?u.apply(this,arguments):u)-a)/Xo.sum(o),s=Xo.range(i.length);null!=e&&s.sort(e===as?function(n,t){return o[t]-o[n]}:function(n,t){return e(i[n],i[t])});var l=[];return s.forEach(function(n){var t;l[n]={data:i[n],value:t=o[n],startAngle:a,endAngle:a+=t*c}}),l}var t=Number,e=as,r=0,u=ka;return n.value=function(e){return arguments.length?(t=e,n):t},n.sort=function(t){return arguments.length?(e=t,n):e},n.startAngle=function(t){return arguments.length?(r=t,n):r},n.endAngle=function(t){return arguments.length?(u=t,n):u},n};var as={};Xo.layout.stack=function(){function n(a,c){var s=a.map(function(e,r){return t.call(n,e,r)}),l=s.map(function(t){return t.map(function(t,e){return[i.call(n,t,e),o.call(n,t,e)]})}),f=e.call(n,l,c);s=Xo.permute(s,f),l=Xo.permute(l,f);var h,g,p,v=r.call(n,l,c),d=s.length,m=s[0].length;for(g=0;m>g;++g)for(u.call(n,s[0][g],p=v[g],l[0][g][1]),h=1;d>h;++h)u.call(n,s[h][g],p+=l[h-1][g][1],l[h][g][1]);return a}var t=bt,e=Qu,r=ni,u=Ku,i=Ju,o=Gu;return n.values=function(e){return arguments.length?(t=e,n):t},n.order=function(t){return arguments.length?(e="function"==typeof t?t:cs.get(t)||Qu,n):e},n.offset=function(t){return arguments.length?(r="function"==typeof t?t:ss.get(t)||ni,n):r},n.x=function(t){return arguments.length?(i=t,n):i},n.y=function(t){return arguments.length?(o=t,n):o},n.out=function(t){return arguments.length?(u=t,n):u},n};var cs=Xo.map({"inside-out":function(n){var t,e,r=n.length,u=n.map(ti),i=n.map(ei),o=Xo.range(r).sort(function(n,t){return u[n]-u[t]}),a=0,c=0,s=[],l=[];for(t=0;r>t;++t)e=o[t],c>a?(a+=i[e],s.push(e)):(c+=i[e],l.push(e));return l.reverse().concat(s)},reverse:function(n){return Xo.range(n.length).reverse()},"default":Qu}),ss=Xo.map({silhouette:function(n){var t,e,r,u=n.length,i=n[0].length,o=[],a=0,c=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];r>a&&(a=r),o.push(r)}for(e=0;i>e;++e)c[e]=(a-o[e])/2;return c},wiggle:function(n){var t,e,r,u,i,o,a,c,s,l=n.length,f=n[0],h=f.length,g=[];for(g[0]=c=s=0,e=1;h>e;++e){for(t=0,u=0;l>t;++t)u+=n[t][e][1];for(t=0,i=0,a=f[e][0]-f[e-1][0];l>t;++t){for(r=0,o=(n[t][e][1]-n[t][e-1][1])/(2*a);t>r;++r)o+=(n[r][e][1]-n[r][e-1][1])/a;i+=o*n[t][e][1]}g[e]=c-=u?i/u*a:0,s>c&&(s=c)}for(e=0;h>e;++e)g[e]-=s;return g},expand:function(n){var t,e,r,u=n.length,i=n[0].length,o=1/u,a=[];for(e=0;i>e;++e){for(t=0,r=0;u>t;t++)r+=n[t][e][1];if(r)for(t=0;u>t;t++)n[t][e][1]/=r;else for(t=0;u>t;t++)n[t][e][1]=o}for(e=0;i>e;++e)a[e]=0;return a},zero:ni});Xo.layout.histogram=function(){function n(n,i){for(var o,a,c=[],s=n.map(e,this),l=r.call(this,s,i),f=u.call(this,l,s,i),i=-1,h=s.length,g=f.length-1,p=t?1:1/h;++i<g;)o=c[i]=[],o.dx=f[i+1]-(o.x=f[i]),o.y=0;if(g>0)for(i=-1;++i<h;)a=s[i],a>=l[0]&&a<=l[1]&&(o=c[Xo.bisect(f,a,1,g)-1],o.y+=p,o.push(n[i]));return c}var t=!0,e=Number,r=oi,u=ui;return n.value=function(t){return arguments.length?(e=t,n):e},n.range=function(t){return arguments.length?(r=_t(t),n):r},n.bins=function(t){return arguments.length?(u="number"==typeof t?function(n){return ii(n,t)}:_t(t),n):u},n.frequency=function(e){return arguments.length?(t=!!e,n):t},n},Xo.layout.tree=function(){function n(n,i){function o(n,t){var r=n.children,u=n._tree;if(r&&(i=r.length)){for(var i,a,s,l=r[0],f=l,h=-1;++h<i;)s=r[h],o(s,a),f=c(s,a,f),a=s;vi(n);var g=.5*(l._tree.prelim+s._tree.prelim);t?(u.prelim=t._tree.prelim+e(n,t),u.mod=u.prelim-g):u.prelim=g}else t&&(u.prelim=t._tree.prelim+e(n,t))}function a(n,t){n.x=n._tree.prelim+t;var e=n.children;if(e&&(r=e.length)){var r,u=-1;for(t+=n._tree.mod;++u<r;)a(e[u],t)}}function c(n,t,r){if(t){for(var u,i=n,o=n,a=t,c=n.parent.children[0],s=i._tree.mod,l=o._tree.mod,f=a._tree.mod,h=c._tree.mod;a=si(a),i=ci(i),a&&i;)c=ci(c),o=si(o),o._tree.ancestor=n,u=a._tree.prelim+f-i._tree.prelim-s+e(a,i),u>0&&(di(mi(a,n,r),n,u),s+=u,l+=u),f+=a._tree.mod,s+=i._tree.mod,h+=c._tree.mod,l+=o._tree.mod;a&&!si(o)&&(o._tree.thread=a,o._tree.mod+=f-l),i&&!ci(c)&&(c._tree.thread=i,c._tree.mod+=s-h,r=n)}return r}var s=t.call(this,n,i),l=s[0];pi(l,function(n,t){n._tree={ancestor:n,prelim:0,mod:0,change:0,shift:0,number:t?t._tree.number+1:0}}),o(l),a(l,-l._tree.prelim);var f=li(l,hi),h=li(l,fi),g=li(l,gi),p=f.x-e(f,h)/2,v=h.x+e(h,f)/2,d=g.depth||1;return pi(l,u?function(n){n.x*=r[0],n.y=n.depth*r[1],delete n._tree}:function(n){n.x=(n.x-p)/(v-p)*r[0],n.y=n.depth/d*r[1],delete n._tree}),s}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.pack=function(){function n(n,i){var o=e.call(this,n,i),a=o[0],c=u[0],s=u[1],l=null==t?Math.sqrt:"function"==typeof t?t:function(){return t};if(a.x=a.y=0,pi(a,function(n){n.r=+l(n.value)}),pi(a,bi),r){var f=r*(t?1:Math.max(2*a.r/c,2*a.r/s))/2;pi(a,function(n){n.r+=f}),pi(a,bi),pi(a,function(n){n.r-=f})}return ki(a,c/2,s/2,t?1:1/Math.max(2*a.r/c,2*a.r/s)),o}var t,e=Xo.layout.hierarchy().sort(yi),r=0,u=[1,1];return n.size=function(t){return arguments.length?(u=t,n):u},n.radius=function(e){return arguments.length?(t=null==e||"function"==typeof e?e:+e,n):t},n.padding=function(t){return arguments.length?(r=+t,n):r},Vu(n,e)},Xo.layout.cluster=function(){function n(n,i){var o,a=t.call(this,n,i),c=a[0],s=0;pi(c,function(n){var t=n.children;t&&t.length?(n.x=Ci(t),n.y=Ai(t)):(n.x=o?s+=e(n,o):0,n.y=0,o=n)});var l=Ni(c),f=Li(c),h=l.x-e(l,f)/2,g=f.x+e(f,l)/2;return pi(c,u?function(n){n.x=(n.x-c.x)*r[0],n.y=(c.y-n.y)*r[1]}:function(n){n.x=(n.x-h)/(g-h)*r[0],n.y=(1-(c.y?n.y/c.y:1))*r[1]}),a}var t=Xo.layout.hierarchy().sort(null).value(null),e=ai,r=[1,1],u=!1;return n.separation=function(t){return arguments.length?(e=t,n):e},n.size=function(t){return arguments.length?(u=null==(r=t),n):u?null:r},n.nodeSize=function(t){return arguments.length?(u=null!=(r=t),n):u?r:null},Vu(n,t)},Xo.layout.treemap=function(){function n(n,t){for(var e,r,u=-1,i=n.length;++u<i;)r=(e=n[u]).value*(0>t?0:t),e.area=isNaN(r)||0>=r?0:r}function t(e){var i=e.children;if(i&&i.length){var o,a,c,s=f(e),l=[],h=i.slice(),p=1/0,v="slice"===g?s.dx:"dice"===g?s.dy:"slice-dice"===g?1&e.depth?s.dy:s.dx:Math.min(s.dx,s.dy);for(n(h,s.dx*s.dy/e.value),l.area=0;(c=h.length)>0;)l.push(o=h[c-1]),l.area+=o.area,"squarify"!==g||(a=r(l,v))<=p?(h.pop(),p=a):(l.area-=l.pop().area,u(l,v,s,!1),v=Math.min(s.dx,s.dy),l.length=l.area=0,p=1/0);l.length&&(u(l,v,s,!0),l.length=l.area=0),i.forEach(t)}}function e(t){var r=t.children;if(r&&r.length){var i,o=f(t),a=r.slice(),c=[];for(n(a,o.dx*o.dy/t.value),c.area=0;i=a.pop();)c.push(i),c.area+=i.area,null!=i.z&&(u(c,i.z?o.dx:o.dy,o,!a.length),c.length=c.area=0);r.forEach(e)}}function r(n,t){for(var e,r=n.area,u=0,i=1/0,o=-1,a=n.length;++o<a;)(e=n[o].area)&&(i>e&&(i=e),e>u&&(u=e));return r*=r,t*=t,r?Math.max(t*u*p/r,r/(t*i*p)):1/0}function u(n,t,e,r){var u,i=-1,o=n.length,a=e.x,s=e.y,l=t?c(n.area/t):0;if(t==e.dx){for((r||l>e.dy)&&(l=e.dy);++i<o;)u=n[i],u.x=a,u.y=s,u.dy=l,a+=u.dx=Math.min(e.x+e.dx-a,l?c(u.area/l):0);u.z=!0,u.dx+=e.x+e.dx-a,e.y+=l,e.dy-=l}else{for((r||l>e.dx)&&(l=e.dx);++i<o;)u=n[i],u.x=a,u.y=s,u.dx=l,s+=u.dy=Math.min(e.y+e.dy-s,l?c(u.area/l):0);u.z=!1,u.dy+=e.y+e.dy-s,e.x+=l,e.dx-=l}}function i(r){var u=o||a(r),i=u[0];return i.x=0,i.y=0,i.dx=s[0],i.dy=s[1],o&&a.revalue(i),n([i],i.dx*i.dy/i.value),(o?e:t)(i),h&&(o=u),u}var o,a=Xo.layout.hierarchy(),c=Math.round,s=[1,1],l=null,f=Ti,h=!1,g="squarify",p=.5*(1+Math.sqrt(5));return i.size=function(n){return arguments.length?(s=n,i):s},i.padding=function(n){function t(t){var e=n.call(i,t,t.depth);return null==e?Ti(t):qi(t,"number"==typeof e?[e,e,e,e]:e)}function e(t){return qi(t,n)}if(!arguments.length)return l;var r;return f=null==(l=n)?Ti:"function"==(r=typeof n)?t:"number"===r?(n=[n,n,n,n],e):e,i},i.round=function(n){return arguments.length?(c=n?Math.round:Number,i):c!=Number},i.sticky=function(n){return arguments.length?(h=n,o=null,i):h},i.ratio=function(n){return arguments.length?(p=n,i):p},i.mode=function(n){return arguments.length?(g=n+"",i):g},Vu(i,a)},Xo.random={normal:function(n,t){var e=arguments.length;return 2>e&&(t=1),1>e&&(n=0),function(){var e,r,u;do e=2*Math.random()-1,r=2*Math.random()-1,u=e*e+r*r;while(!u||u>1);return n+t*e*Math.sqrt(-2*Math.log(u)/u)}},logNormal:function(){var n=Xo.random.normal.apply(Xo,arguments);return function(){return Math.exp(n())}},bates:function(n){var t=Xo.random.irwinHall(n);return function(){return t()/n}},irwinHall:function(n){return function(){for(var t=0,e=0;n>e;e++)t+=Math.random();return t}}},Xo.scale={};var ls={floor:bt,ceil:bt};Xo.scale.linear=function(){return Hi([0,1],[0,1],fu,!1)};var fs={s:1,g:1,p:1,r:1,e:1};Xo.scale.log=function(){return $i(Xo.scale.linear().domain([0,1]),10,!0,[1,10])};var hs=Xo.format(".0e"),gs={floor:function(n){return-Math.ceil(-n)},ceil:function(n){return-Math.floor(-n)}};Xo.scale.pow=function(){return Bi(Xo.scale.linear(),1,[0,1])},Xo.scale.sqrt=function(){return Xo.scale.pow().exponent(.5)},Xo.scale.ordinal=function(){return Ji([],{t:"range",a:[[]]})},Xo.scale.category10=function(){return Xo.scale.ordinal().range(ps)},Xo.scale.category20=function(){return Xo.scale.ordinal().range(vs)},Xo.scale.category20b=function(){return Xo.scale.ordinal().range(ds)},Xo.scale.category20c=function(){return Xo.scale.ordinal().range(ms)};var ps=[2062260,16744206,2924588,14034728,9725885,9197131,14907330,8355711,12369186,1556175].map(ht),vs=[2062260,11454440,16744206,16759672,2924588,10018698,14034728,16750742,9725885,12955861,9197131,12885140,14907330,16234194,8355711,13092807,12369186,14408589,1556175,10410725].map(ht),ds=[3750777,5395619,7040719,10264286,6519097,9216594,11915115,13556636,9202993,12426809,15186514,15190932,8666169,11356490,14049643,15177372,8077683,10834324,13528509,14589654].map(ht),ms=[3244733,7057110,10406625,13032431,15095053,16616764,16625259,16634018,3253076,7652470,10607003,13101504,7695281,10394312,12369372,14342891,6513507,9868950,12434877,14277081].map(ht);Xo.scale.quantile=function(){return Gi([],[])},Xo.scale.quantize=function(){return Ki(0,1,[0,1])},Xo.scale.threshold=function(){return Qi([.5],[0,1])},Xo.scale.identity=function(){return no([0,1])},Xo.svg={},Xo.svg.arc=function(){function n(){var n=t.apply(this,arguments),i=e.apply(this,arguments),o=r.apply(this,arguments)+ys,a=u.apply(this,arguments)+ys,c=(o>a&&(c=o,o=a,a=c),a-o),s=Sa>c?"0":"1",l=Math.cos(o),f=Math.sin(o),h=Math.cos(a),g=Math.sin(a);return c>=xs?n?"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"M0,"+n+"A"+n+","+n+" 0 1,0 0,"+-n+"A"+n+","+n+" 0 1,0 0,"+n+"Z":"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":n?"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L"+n*h+","+n*g+"A"+n+","+n+" 0 "+s+",0 "+n*l+","+n*f+"Z":"M"+i*l+","+i*f+"A"+i+","+i+" 0 "+s+",1 "+i*h+","+i*g+"L0,0"+"Z"}var t=to,e=eo,r=ro,u=uo;return n.innerRadius=function(e){return arguments.length?(t=_t(e),n):t},n.outerRadius=function(t){return arguments.length?(e=_t(t),n):e},n.startAngle=function(t){return arguments.length?(r=_t(t),n):r},n.endAngle=function(t){return arguments.length?(u=_t(t),n):u},n.centroid=function(){var n=(t.apply(this,arguments)+e.apply(this,arguments))/2,i=(r.apply(this,arguments)+u.apply(this,arguments))/2+ys;return[Math.cos(i)*n,Math.sin(i)*n]},n};var ys=-Ea,xs=ka-Aa;Xo.svg.line=function(){return io(bt)};var Ms=Xo.map({linear:oo,"linear-closed":ao,step:co,"step-before":so,"step-after":lo,basis:mo,"basis-open":yo,"basis-closed":xo,bundle:Mo,cardinal:go,"cardinal-open":fo,"cardinal-closed":ho,monotone:Eo});Ms.forEach(function(n,t){t.key=n,t.closed=/-closed$/.test(n)});var _s=[0,2/3,1/3,0],bs=[0,1/3,2/3,0],ws=[0,1/6,2/3,1/6];Xo.svg.line.radial=function(){var n=io(Ao);return n.radius=n.x,delete n.x,n.angle=n.y,delete n.y,n},so.reverse=lo,lo.reverse=so,Xo.svg.area=function(){return Co(bt)},Xo.svg.area.radial=function(){var n=Co(Ao);return n.radius=n.x,delete n.x,n.innerRadius=n.x0,delete n.x0,n.outerRadius=n.x1,delete n.x1,n.angle=n.y,delete n.y,n.startAngle=n.y0,delete n.y0,n.endAngle=n.y1,delete n.y1,n},Xo.svg.chord=function(){function n(n,a){var c=t(this,i,n,a),s=t(this,o,n,a);return"M"+c.p0+r(c.r,c.p1,c.a1-c.a0)+(e(c,s)?u(c.r,c.p1,c.r,c.p0):u(c.r,c.p1,s.r,s.p0)+r(s.r,s.p1,s.a1-s.a0)+u(s.r,s.p1,c.r,c.p0))+"Z"}function t(n,t,e,r){var u=t.call(n,e,r),i=a.call(n,u,r),o=c.call(n,u,r)+ys,l=s.call(n,u,r)+ys;return{r:i,a0:o,a1:l,p0:[i*Math.cos(o),i*Math.sin(o)],p1:[i*Math.cos(l),i*Math.sin(l)]}}function e(n,t){return n.a0==t.a0&&n.a1==t.a1}function r(n,t,e){return"A"+n+","+n+" 0 "+ +(e>Sa)+",1 "+t}function u(n,t,e,r){return"Q 0,0 "+r}var i=hr,o=gr,a=No,c=ro,s=uo;return n.radius=function(t){return arguments.length?(a=_t(t),n):a},n.source=function(t){return arguments.length?(i=_t(t),n):i},n.target=function(t){return arguments.length?(o=_t(t),n):o},n.startAngle=function(t){return arguments.length?(c=_t(t),n):c},n.endAngle=function(t){return arguments.length?(s=_t(t),n):s},n},Xo.svg.diagonal=function(){function n(n,u){var i=t.call(this,n,u),o=e.call(this,n,u),a=(i.y+o.y)/2,c=[i,{x:i.x,y:a},{x:o.x,y:a},o];return c=c.map(r),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var t=hr,e=gr,r=Lo;return n.source=function(e){return arguments.length?(t=_t(e),n):t},n.target=function(t){return arguments.length?(e=_t(t),n):e},n.projection=function(t){return arguments.length?(r=t,n):r},n},Xo.svg.diagonal.radial=function(){var n=Xo.svg.diagonal(),t=Lo,e=n.projection;return n.projection=function(n){return arguments.length?e(To(t=n)):t},n},Xo.svg.symbol=function(){function n(n,r){return(Ss.get(t.call(this,n,r))||Ro)(e.call(this,n,r))}var t=zo,e=qo;return n.type=function(e){return arguments.length?(t=_t(e),n):t},n.size=function(t){return arguments.length?(e=_t(t),n):e},n};var Ss=Xo.map({circle:Ro,cross:function(n){var t=Math.sqrt(n/5)/2;return"M"+-3*t+","+-t+"H"+-t+"V"+-3*t+"H"+t+"V"+-t+"H"+3*t+"V"+t+"H"+t+"V"+3*t+"H"+-t+"V"+t+"H"+-3*t+"Z"},diamond:function(n){var t=Math.sqrt(n/(2*Cs)),e=t*Cs;return"M0,"+-t+"L"+e+",0"+" 0,"+t+" "+-e+",0"+"Z"},square:function(n){var t=Math.sqrt(n)/2;return"M"+-t+","+-t+"L"+t+","+-t+" "+t+","+t+" "+-t+","+t+"Z"},"triangle-down":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+e+"L"+t+","+-e+" "+-t+","+-e+"Z"},"triangle-up":function(n){var t=Math.sqrt(n/As),e=t*As/2;return"M0,"+-e+"L"+t+","+e+" "+-t+","+e+"Z"}});Xo.svg.symbolTypes=Ss.keys();var ks,Es,As=Math.sqrt(3),Cs=Math.tan(30*Na),Ns=[],Ls=0;Ns.call=da.call,Ns.empty=da.empty,Ns.node=da.node,Ns.size=da.size,Xo.transition=function(n){return arguments.length?ks?n.transition():n:xa.transition()},Xo.transition.prototype=Ns,Ns.select=function(n){var t,e,r,u=this.id,i=[];n=M(n);for(var o=-1,a=this.length;++o<a;){i.push(t=[]);for(var c=this[o],s=-1,l=c.length;++s<l;)(r=c[s])&&(e=n.call(r,r.__data__,s,o))?("__data__"in r&&(e.__data__=r.__data__),jo(e,s,u,r.__transition__[u]),t.push(e)):t.push(null)}return Do(i,u)},Ns.selectAll=function(n){var t,e,r,u,i,o=this.id,a=[];n=_(n);for(var c=-1,s=this.length;++c<s;)for(var l=this[c],f=-1,h=l.length;++f<h;)if(r=l[f]){i=r.__transition__[o],e=n.call(r,r.__data__,f,c),a.push(t=[]);for(var g=-1,p=e.length;++g<p;)(u=e[g])&&jo(u,g,o,i),t.push(u)}return Do(a,o)},Ns.filter=function(n){var t,e,r,u=[];"function"!=typeof n&&(n=q(n));for(var i=0,o=this.length;o>i;i++){u.push(t=[]);for(var e=this[i],a=0,c=e.length;c>a;a++)(r=e[a])&&n.call(r,r.__data__,a,i)&&t.push(r)}return Do(u,this.id)},Ns.tween=function(n,t){var e=this.id;return arguments.length<2?this.node().__transition__[e].tween.get(n):R(this,null==t?function(t){t.__transition__[e].tween.remove(n)}:function(r){r.__transition__[e].tween.set(n,t)})},Ns.attr=function(n,t){function e(){this.removeAttribute(a)}function r(){this.removeAttributeNS(a.space,a.local)}function u(n){return null==n?e:(n+="",function(){var t,e=this.getAttribute(a);return e!==n&&(t=o(e,n),function(n){this.setAttribute(a,t(n))})})}function i(n){return null==n?r:(n+="",function(){var t,e=this.getAttributeNS(a.space,a.local);return e!==n&&(t=o(e,n),function(n){this.setAttributeNS(a.space,a.local,t(n))})})}if(arguments.length<2){for(t in n)this.attr(t,n[t]);return this}var o="transform"==n?Ru:fu,a=Xo.ns.qualify(n);return Po(this,"attr."+n,t,a.local?i:u)},Ns.attrTween=function(n,t){function e(n,e){var r=t.call(this,n,e,this.getAttribute(u));return r&&function(n){this.setAttribute(u,r(n))}}function r(n,e){var r=t.call(this,n,e,this.getAttributeNS(u.space,u.local));return r&&function(n){this.setAttributeNS(u.space,u.local,r(n))}}var u=Xo.ns.qualify(n);return this.tween("attr."+n,u.local?r:e)},Ns.style=function(n,t,e){function r(){this.style.removeProperty(n)}function u(t){return null==t?r:(t+="",function(){var r,u=Go.getComputedStyle(this,null).getPropertyValue(n);return u!==t&&(r=fu(u,t),function(t){this.style.setProperty(n,r(t),e)})})}var i=arguments.length;if(3>i){if("string"!=typeof n){2>i&&(t="");for(e in n)this.style(e,n[e],t);return this}e=""}return Po(this,"style."+n,t,u)},Ns.styleTween=function(n,t,e){function r(r,u){var i=t.call(this,r,u,Go.getComputedStyle(this,null).getPropertyValue(n));return i&&function(t){this.style.setProperty(n,i(t),e)}}return arguments.length<3&&(e=""),this.tween("style."+n,r)},Ns.text=function(n){return Po(this,"text",n,Uo)},Ns.remove=function(){return this.each("end.transition",function(){var n;this.__transition__.count<2&&(n=this.parentNode)&&n.removeChild(this)})},Ns.ease=function(n){var t=this.id;return arguments.length<1?this.node().__transition__[t].ease:("function"!=typeof n&&(n=Xo.ease.apply(Xo,arguments)),R(this,function(e){e.__transition__[t].ease=n}))},Ns.delay=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].delay=+n.call(e,e.__data__,r,u)}:(n=+n,function(e){e.__transition__[t].delay=n}))},Ns.duration=function(n){var t=this.id;return R(this,"function"==typeof n?function(e,r,u){e.__transition__[t].duration=Math.max(1,n.call(e,e.__data__,r,u))}:(n=Math.max(1,n),function(e){e.__transition__[t].duration=n}))},Ns.each=function(n,t){var e=this.id;if(arguments.length<2){var r=Es,u=ks;ks=e,R(this,function(t,r,u){Es=t.__transition__[e],n.call(t,t.__data__,r,u)}),Es=r,ks=u}else R(this,function(r){var u=r.__transition__[e];(u.event||(u.event=Xo.dispatch("start","end"))).on(n,t)});return this},Ns.transition=function(){for(var n,t,e,r,u=this.id,i=++Ls,o=[],a=0,c=this.length;c>a;a++){o.push(n=[]);for(var t=this[a],s=0,l=t.length;l>s;s++)(e=t[s])&&(r=Object.create(e.__transition__[u]),r.delay+=r.duration,jo(e,s,i,r)),n.push(e)}return Do(o,i)},Xo.svg.axis=function(){function n(n){n.each(function(){var n,s=Xo.select(this),l=this.__chart__||e,f=this.__chart__=e.copy(),h=null==c?f.ticks?f.ticks.apply(f,a):f.domain():c,g=null==t?f.tickFormat?f.tickFormat.apply(f,a):bt:t,p=s.selectAll(".tick").data(h,f),v=p.enter().insert("g",".domain").attr("class","tick").style("opacity",Aa),d=Xo.transition(p.exit()).style("opacity",Aa).remove(),m=Xo.transition(p).style("opacity",1),y=Ri(f),x=s.selectAll(".domain").data([0]),M=(x.enter().append("path").attr("class","domain"),Xo.transition(x));v.append("line"),v.append("text");var _=v.select("line"),b=m.select("line"),w=p.select("text").text(g),S=v.select("text"),k=m.select("text");switch(r){case"bottom":n=Ho,_.attr("y2",u),S.attr("y",Math.max(u,0)+o),b.attr("x2",0).attr("y2",u),k.attr("x",0).attr("y",Math.max(u,0)+o),w.attr("dy",".71em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+i+"V0H"+y[1]+"V"+i);break;case"top":n=Ho,_.attr("y2",-u),S.attr("y",-(Math.max(u,0)+o)),b.attr("x2",0).attr("y2",-u),k.attr("x",0).attr("y",-(Math.max(u,0)+o)),w.attr("dy","0em").style("text-anchor","middle"),M.attr("d","M"+y[0]+","+-i+"V0H"+y[1]+"V"+-i);break;case"left":n=Fo,_.attr("x2",-u),S.attr("x",-(Math.max(u,0)+o)),b.attr("x2",-u).attr("y2",0),k.attr("x",-(Math.max(u,0)+o)).attr("y",0),w.attr("dy",".32em").style("text-anchor","end"),M.attr("d","M"+-i+","+y[0]+"H0V"+y[1]+"H"+-i);break;case"right":n=Fo,_.attr("x2",u),S.attr("x",Math.max(u,0)+o),b.attr("x2",u).attr("y2",0),k.attr("x",Math.max(u,0)+o).attr("y",0),w.attr("dy",".32em").style("text-anchor","start"),M.attr("d","M"+i+","+y[0]+"H0V"+y[1]+"H"+i)}if(f.rangeBand){var E=f,A=E.rangeBand()/2;l=f=function(n){return E(n)+A}}else l.rangeBand?l=f:d.call(n,f);v.call(n,l),m.call(n,f)})}var t,e=Xo.scale.linear(),r=Ts,u=6,i=6,o=3,a=[10],c=null;return n.scale=function(t){return arguments.length?(e=t,n):e},n.orient=function(t){return arguments.length?(r=t in qs?t+"":Ts,n):r},n.ticks=function(){return arguments.length?(a=arguments,n):a},n.tickValues=function(t){return arguments.length?(c=t,n):c},n.tickFormat=function(e){return arguments.length?(t=e,n):t},n.tickSize=function(t){var e=arguments.length;return e?(u=+t,i=+arguments[e-1],n):u},n.innerTickSize=function(t){return arguments.length?(u=+t,n):u},n.outerTickSize=function(t){return arguments.length?(i=+t,n):i},n.tickPadding=function(t){return arguments.length?(o=+t,n):o},n.tickSubdivide=function(){return arguments.length&&n},n};var Ts="bottom",qs={top:1,right:1,bottom:1,left:1};Xo.svg.brush=function(){function n(i){i.each(function(){var i=Xo.select(this).style("pointer-events","all").style("-webkit-tap-highlight-color","rgba(0,0,0,0)").on("mousedown.brush",u).on("touchstart.brush",u),o=i.selectAll(".background").data([0]);o.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),i.selectAll(".extent").data([0]).enter().append("rect").attr("class","extent").style("cursor","move");var a=i.selectAll(".resize").data(p,bt);a.exit().remove(),a.enter().append("g").attr("class",function(n){return"resize "+n}).style("cursor",function(n){return zs[n]}).append("rect").attr("x",function(n){return/[ew]$/.test(n)?-3:null}).attr("y",function(n){return/^[ns]/.test(n)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),a.style("display",n.empty()?"none":null);var l,f=Xo.transition(i),h=Xo.transition(o);c&&(l=Ri(c),h.attr("x",l[0]).attr("width",l[1]-l[0]),e(f)),s&&(l=Ri(s),h.attr("y",l[0]).attr("height",l[1]-l[0]),r(f)),t(f)})}function t(n){n.selectAll(".resize").attr("transform",function(n){return"translate("+l[+/e$/.test(n)]+","+f[+/^s/.test(n)]+")"})}function e(n){n.select(".extent").attr("x",l[0]),n.selectAll(".extent,.n>rect,.s>rect").attr("width",l[1]-l[0])}function r(n){n.select(".extent").attr("y",f[0]),n.selectAll(".extent,.e>rect,.w>rect").attr("height",f[1]-f[0])}function u(){function u(){32==Xo.event.keyCode&&(C||(x=null,L[0]-=l[1],L[1]-=f[1],C=2),d())}function p(){32==Xo.event.keyCode&&2==C&&(L[0]+=l[1],L[1]+=f[1],C=0,d())}function v(){var n=Xo.mouse(_),u=!1;M&&(n[0]+=M[0],n[1]+=M[1]),C||(Xo.event.altKey?(x||(x=[(l[0]+l[1])/2,(f[0]+f[1])/2]),L[0]=l[+(n[0]<x[0])],L[1]=f[+(n[1]<x[1])]):x=null),E&&m(n,c,0)&&(e(S),u=!0),A&&m(n,s,1)&&(r(S),u=!0),u&&(t(S),w({type:"brush",mode:C?"move":"resize"}))}function m(n,t,e){var r,u,a=Ri(t),c=a[0],s=a[1],p=L[e],v=e?f:l,d=v[1]-v[0];return C&&(c-=p,s-=d+p),r=(e?g:h)?Math.max(c,Math.min(s,n[e])):n[e],C?u=(r+=p)+d:(x&&(p=Math.max(c,Math.min(s,2*x[e]-r))),r>p?(u=r,r=p):u=p),v[0]!=r||v[1]!=u?(e?o=null:i=null,v[0]=r,v[1]=u,!0):void 0}function y(){v(),S.style("pointer-events","all").selectAll(".resize").style("display",n.empty()?"none":null),Xo.select("body").style("cursor",null),T.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),N(),w({type:"brushend"})}var x,M,_=this,b=Xo.select(Xo.event.target),w=a.of(_,arguments),S=Xo.select(_),k=b.datum(),E=!/^(n|s)$/.test(k)&&c,A=!/^(e|w)$/.test(k)&&s,C=b.classed("extent"),N=O(),L=Xo.mouse(_),T=Xo.select(Go).on("keydown.brush",u).on("keyup.brush",p);if(Xo.event.changedTouches?T.on("touchmove.brush",v).on("touchend.brush",y):T.on("mousemove.brush",v).on("mouseup.brush",y),S.interrupt().selectAll("*").interrupt(),C)L[0]=l[0]-L[0],L[1]=f[0]-L[1];else if(k){var q=+/w$/.test(k),z=+/^n/.test(k);M=[l[1-q]-L[0],f[1-z]-L[1]],L[0]=l[q],L[1]=f[z]}else Xo.event.altKey&&(x=L.slice());S.style("pointer-events","none").selectAll(".resize").style("display",null),Xo.select("body").style("cursor",b.style("cursor")),w({type:"brushstart"}),v()}var i,o,a=y(n,"brushstart","brush","brushend"),c=null,s=null,l=[0,0],f=[0,0],h=!0,g=!0,p=Rs[0];return n.event=function(n){n.each(function(){var n=a.of(this,arguments),t={x:l,y:f,i:i,j:o},e=this.__chart__||t;this.__chart__=t,ks?Xo.select(this).transition().each("start.brush",function(){i=e.i,o=e.j,l=e.x,f=e.y,n({type:"brushstart"})}).tween("brush:brush",function(){var e=hu(l,t.x),r=hu(f,t.y);return i=o=null,function(u){l=t.x=e(u),f=t.y=r(u),n({type:"brush",mode:"resize"})}}).each("end.brush",function(){i=t.i,o=t.j,n({type:"brush",mode:"resize"}),n({type:"brushend"})}):(n({type:"brushstart"}),n({type:"brush",mode:"resize"}),n({type:"brushend"}))})},n.x=function(t){return arguments.length?(c=t,p=Rs[!c<<1|!s],n):c},n.y=function(t){return arguments.length?(s=t,p=Rs[!c<<1|!s],n):s},n.clamp=function(t){return arguments.length?(c&&s?(h=!!t[0],g=!!t[1]):c?h=!!t:s&&(g=!!t),n):c&&s?[h,g]:c?h:s?g:null},n.extent=function(t){var e,r,u,a,h;return arguments.length?(c&&(e=t[0],r=t[1],s&&(e=e[0],r=r[0]),i=[e,r],c.invert&&(e=c(e),r=c(r)),e>r&&(h=e,e=r,r=h),(e!=l[0]||r!=l[1])&&(l=[e,r])),s&&(u=t[0],a=t[1],c&&(u=u[1],a=a[1]),o=[u,a],s.invert&&(u=s(u),a=s(a)),u>a&&(h=u,u=a,a=h),(u!=f[0]||a!=f[1])&&(f=[u,a])),n):(c&&(i?(e=i[0],r=i[1]):(e=l[0],r=l[1],c.invert&&(e=c.invert(e),r=c.invert(r)),e>r&&(h=e,e=r,r=h))),s&&(o?(u=o[0],a=o[1]):(u=f[0],a=f[1],s.invert&&(u=s.invert(u),a=s.invert(a)),u>a&&(h=u,u=a,a=h))),c&&s?[[e,u],[r,a]]:c?[e,r]:s&&[u,a])},n.clear=function(){return n.empty()||(l=[0,0],f=[0,0],i=o=null),n},n.empty=function(){return!!c&&l[0]==l[1]||!!s&&f[0]==f[1]},Xo.rebind(n,a,"on")};var zs={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Rs=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]],Ds=tc.format=ac.timeFormat,Ps=Ds.utc,Us=Ps("%Y-%m-%dT%H:%M:%S.%LZ");Ds.iso=Date.prototype.toISOString&&+new Date("2000-01-01T00:00:00.000Z")?Oo:Us,Oo.parse=function(n){var t=new Date(n);return isNaN(t)?null:t},Oo.toString=Us.toString,tc.second=Rt(function(n){return new ec(1e3*Math.floor(n/1e3))},function(n,t){n.setTime(n.getTime()+1e3*Math.floor(t))},function(n){return n.getSeconds()}),tc.seconds=tc.second.range,tc.seconds.utc=tc.second.utc.range,tc.minute=Rt(function(n){return new ec(6e4*Math.floor(n/6e4))},function(n,t){n.setTime(n.getTime()+6e4*Math.floor(t))},function(n){return n.getMinutes()}),tc.minutes=tc.minute.range,tc.minutes.utc=tc.minute.utc.range,tc.hour=Rt(function(n){var t=n.getTimezoneOffset()/60;return new ec(36e5*(Math.floor(n/36e5-t)+t))},function(n,t){n.setTime(n.getTime()+36e5*Math.floor(t))},function(n){return n.getHours()}),tc.hours=tc.hour.range,tc.hours.utc=tc.hour.utc.range,tc.month=Rt(function(n){return n=tc.day(n),n.setDate(1),n},function(n,t){n.setMonth(n.getMonth()+t)},function(n){return n.getMonth()}),tc.months=tc.month.range,tc.months.utc=tc.month.utc.range;var js=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Hs=[[tc.second,1],[tc.second,5],[tc.second,15],[tc.second,30],[tc.minute,1],[tc.minute,5],[tc.minute,15],[tc.minute,30],[tc.hour,1],[tc.hour,3],[tc.hour,6],[tc.hour,12],[tc.day,1],[tc.day,2],[tc.week,1],[tc.month,1],[tc.month,3],[tc.year,1]],Fs=Ds.multi([[".%L",function(n){return n.getMilliseconds()}],[":%S",function(n){return n.getSeconds()}],["%I:%M",function(n){return n.getMinutes()}],["%I %p",function(n){return n.getHours()}],["%a %d",function(n){return n.getDay()&&1!=n.getDate()}],["%b %d",function(n){return 1!=n.getDate()}],["%B",function(n){return n.getMonth()}],["%Y",be]]),Os={range:function(n,t,e){return Xo.range(Math.ceil(n/e)*e,+t,e).map(Io)},floor:bt,ceil:bt};Hs.year=tc.year,tc.scale=function(){return Yo(Xo.scale.linear(),Hs,Fs)};var Ys=Hs.map(function(n){return[n[0].utc,n[1]]}),Is=Ps.multi([[".%L",function(n){return n.getUTCMilliseconds()}],[":%S",function(n){return n.getUTCSeconds()}],["%I:%M",function(n){return n.getUTCMinutes()}],["%I %p",function(n){return n.getUTCHours()}],["%a %d",function(n){return n.getUTCDay()&&1!=n.getUTCDate()}],["%b %d",function(n){return 1!=n.getUTCDate()}],["%B",function(n){return n.getUTCMonth()}],["%Y",be]]);Ys.year=tc.year.utc,tc.scale.utc=function(){return Yo(Xo.scale.linear(),Ys,Is)},Xo.text=wt(function(n){return n.responseText}),Xo.json=function(n,t){return St(n,"application/json",Zo,t)},Xo.html=function(n,t){return St(n,"text/html",Vo,t)},Xo.xml=wt(function(n){return n.responseXML}),"function"==typeof define&&define.amd?define(Xo):"object"==typeof module&&module.exports?module.exports=Xo:this.d3=Xo}();'use strict';(function(window){window.define=undefined;}).call(this,this);'use strict';tr.exportTo('tr.ui.b',function(){const DataSeriesEnableChangeEventType='data-series-enabled-change';const THIS_DOC=document.currentScript.ownerDocument;const svgNS='http://www.w3.org/2000/svg';const ColorScheme=tr.b.ColorScheme;function getColorOfKey(key,selected){let id=ColorScheme.getColorIdForGeneralPurposeString(key);if(selected){id+=ColorScheme.properties.brightenedOffsets[0];} return ColorScheme.colorsAsStrings[id];} function getSVGTextSize(parentNode,text,opt_callback,opt_this){const textNode=document.createElementNS('http://www.w3.org/2000/svg','text');textNode.setAttributeNS(null,'x',0);textNode.setAttributeNS(null,'y',0);textNode.setAttributeNS(null,'fill','black');textNode.appendChild(document.createTextNode(text));parentNode.appendChild(textNode);if(opt_callback){opt_callback.call(opt_this||parentNode,textNode);} const width=textNode.getComputedTextLength();const height=textNode.getBBox().height;parentNode.removeChild(textNode);return{width,height};} @@ -8068,7 +8068,7 @@ return snapshot;} function findAllEvents(rendererHelper,category,title){const targetEvents=[];for(const ev of rendererHelper.process.getDescendantEvents()){if(!hasCategoryAndName(ev,category,title))continue;targetEvents.push(ev);} return targetEvents;} -const URL_EXCLUSION=['','about:blank','data:text/html,pluginplaceholderdata','chrome-error://chromewebdata/'];function shouldIgnoreURL(url){return URL_EXCLUSION.includes(url);} +const URL_BLACKLIST=['','about:blank','data:text/html,pluginplaceholderdata','chrome-error://chromewebdata/'];function shouldIgnoreURL(url){return URL_BLACKLIST.includes(url);} function collectTimeToEvent(category,eventName,rendererHelper,frameToNavStartEvents){const targetEvents=findAllEvents(rendererHelper,category,eventName);const samples=[];for(const ev of targetEvents){if(rendererHelper.isTelemetryInternalEvent(ev))continue;const frameIdRef=ev.args.frame;const snapshot=findFrameLoaderSnapshotAt(rendererHelper,frameIdRef,ev.start);if(snapshot===undefined||!snapshot.args.isLoadingMainFrame)continue;const url=snapshot.args.documentLoaderURL;if(shouldIgnoreURL(url))continue;const navigationStartEvent=EventFinderUtils.findLastEventStartingOnOrBeforeTimestamp(frameToNavStartEvents.get(frameIdRef)||[],ev.start);if(navigationStartEvent===undefined)continue;const navStartToEventRange=tr.b.math.Range.fromExplicitRange(navigationStartEvent.start,ev.start);const networkEvents=getNetworkEventsInRange(rendererHelper.process,navStartToEventRange);const breakdownTree=tr.metrics.sh.generateWallClockTimeBreakdownTree(rendererHelper.mainThread,networkEvents,navStartToEventRange);samples.push({value:navStartToEventRange.duration,breakdownTree,diagnostics:{breakdown:createBreakdownDiagnostic(breakdownTree),url:new tr.v.d.GenericSet([url]),Start:new RelatedEventSet(navigationStartEvent),End:new RelatedEventSet(ev)}});} return samples;} function addFirstMeaningfulPaintSample(samples,rendererHelper,navigationStart,fmpMarkerEvent,url){const navStartToFMPRange=tr.b.math.Range.fromExplicitRange(navigationStart.start,fmpMarkerEvent.start);const networkEvents=getNetworkEventsInRange(rendererHelper.process,navStartToFMPRange);const timeToFirstMeaningfulPaint=navStartToFMPRange.duration;const breakdownTree=tr.metrics.sh.generateWallClockTimeBreakdownTree(rendererHelper.mainThread,networkEvents,navStartToFMPRange);samples.push({value:timeToFirstMeaningfulPaint,breakdownTree,diagnostics:{breakdown:createBreakdownDiagnostic(breakdownTree),start:new RelatedEventSet(navigationStart),end:new RelatedEventSet(fmpMarkerEvent),infos:new tr.v.d.GenericSet([{url,pid:rendererHelper.pid,start:navigationStart.start,fmp:fmpMarkerEvent.start,}]),}});} @@ -8083,7 +8083,7 @@ function addSamplesToHistogram(samples,histogram,histograms){for(const sample of samples){histogram.addSample(sample.value,sample.diagnostics);if(histogram.name!=='timeToFirstContentfulPaint')continue;if(!sample.breakdownTree)continue;for(const[category,breakdown]of Object.entries(sample.breakdownTree)){const relatedName=`${histogram.name}:${category}`;let relatedHist=histograms.getHistogramsNamed(relatedName)[0];if(!relatedHist){relatedHist=histograms.createHistogram(relatedName,histogram.unit,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,summaryOptions:{count:false,max:false,min:false,sum:false,},});let relatedNames=histogram.diagnostics.get('breakdown');if(!relatedNames){relatedNames=new tr.v.d.RelatedNameMap();histogram.diagnostics.set('breakdown',relatedNames);} relatedNames.set(category,relatedName);} relatedHist.addSample(breakdown.total,{breakdown:tr.v.d.Breakdown.fromEntries(Object.entries(breakdown.events)),});}}} -function loadingMetric(histograms,model){const firstPaintHistogram=histograms.createHistogram('timeToFirstPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first paint',summaryOptions:SUMMARY_OPTIONS,});const firstContentfulPaintHistogram=histograms.createHistogram('timeToFirstContentfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first contentful paint',summaryOptions:SUMMARY_OPTIONS,});const onLoadHistogram=histograms.createHistogram('timeToOnload',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to onload. '+'This is temporary metric used for PCv1/v2 correctness checking',summaryOptions:SUMMARY_OPTIONS,});const firstMeaningfulPaintHistogram=histograms.createHistogram('timeToFirstMeaningfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first meaningful paint',summaryOptions:SUMMARY_OPTIONS,});const firstMeaningfulPaintCpuTimeHistogram=histograms.createHistogram('cpuTimeToFirstMeaningfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'CPU time to first meaningful paint',summaryOptions:SUMMARY_OPTIONS,});const timeToInteractiveHistogram=histograms.createHistogram('timeToInteractive',timeDurationInMs_smallerIsBetter,[],{binBoundaries:TIME_TO_INTERACTIVE_BOUNDARIES,description:'Time to Interactive',summaryOptions:SUMMARY_OPTIONS,});const timeToFirstCpuIdleHistogram=histograms.createHistogram('timeToFirstCpuIdle',timeDurationInMs_smallerIsBetter,[],{binBoundaries:TIME_TO_INTERACTIVE_BOUNDARIES,description:'Time to First CPU Idle',summaryOptions:SUMMARY_OPTIONS,});const chromeHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);for(const pid in chromeHelper.rendererHelpers){const rendererHelper=chromeHelper.rendererHelpers[pid];if(rendererHelper.isChromeTracingUI)continue;const samplesSet=collectLoadingMetricsForRenderer(rendererHelper);addSamplesToHistogram(samplesSet.firstPaintSamples,firstPaintHistogram,histograms);addSamplesToHistogram(samplesSet.firstContentfulPaintSamples,firstContentfulPaintHistogram,histograms);addSamplesToHistogram(samplesSet.onLoadSamples,onLoadHistogram,histograms);addSamplesToHistogram(samplesSet.firstMeaningfulPaintSamples,firstMeaningfulPaintHistogram,histograms);addSamplesToHistogram(samplesSet.firstMeaningfulPaintCpuTimeSamples,firstMeaningfulPaintCpuTimeHistogram,histograms);addSamplesToHistogram(samplesSet.interactiveSamples,timeToInteractiveHistogram,histograms);addSamplesToHistogram(samplesSet.firstCpuIdleSamples,timeToFirstCpuIdleHistogram,histograms);}} +function loadingMetric(histograms,model){const firstPaintHistogram=histograms.createHistogram('timeToFirstPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first paint',summaryOptions:SUMMARY_OPTIONS,});const firstContentfulPaintHistogram=histograms.createHistogram('timeToFirstContentfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first contentful paint',summaryOptions:SUMMARY_OPTIONS,});const onLoadHistogram=histograms.createHistogram('timeToOnload',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to onload. '+'This is temporary metric used for PCv1/v2 sanity checking',summaryOptions:SUMMARY_OPTIONS,});const firstMeaningfulPaintHistogram=histograms.createHistogram('timeToFirstMeaningfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'time to first meaningful paint',summaryOptions:SUMMARY_OPTIONS,});const firstMeaningfulPaintCpuTimeHistogram=histograms.createHistogram('cpuTimeToFirstMeaningfulPaint',timeDurationInMs_smallerIsBetter,[],{binBoundaries:LOADING_METRIC_BOUNDARIES,description:'CPU time to first meaningful paint',summaryOptions:SUMMARY_OPTIONS,});const timeToInteractiveHistogram=histograms.createHistogram('timeToInteractive',timeDurationInMs_smallerIsBetter,[],{binBoundaries:TIME_TO_INTERACTIVE_BOUNDARIES,description:'Time to Interactive',summaryOptions:SUMMARY_OPTIONS,});const timeToFirstCpuIdleHistogram=histograms.createHistogram('timeToFirstCpuIdle',timeDurationInMs_smallerIsBetter,[],{binBoundaries:TIME_TO_INTERACTIVE_BOUNDARIES,description:'Time to First CPU Idle',summaryOptions:SUMMARY_OPTIONS,});const chromeHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);for(const pid in chromeHelper.rendererHelpers){const rendererHelper=chromeHelper.rendererHelpers[pid];if(rendererHelper.isChromeTracingUI)continue;const samplesSet=collectLoadingMetricsForRenderer(rendererHelper);addSamplesToHistogram(samplesSet.firstPaintSamples,firstPaintHistogram,histograms);addSamplesToHistogram(samplesSet.firstContentfulPaintSamples,firstContentfulPaintHistogram,histograms);addSamplesToHistogram(samplesSet.onLoadSamples,onLoadHistogram,histograms);addSamplesToHistogram(samplesSet.firstMeaningfulPaintSamples,firstMeaningfulPaintHistogram,histograms);addSamplesToHistogram(samplesSet.firstMeaningfulPaintCpuTimeSamples,firstMeaningfulPaintCpuTimeHistogram,histograms);addSamplesToHistogram(samplesSet.interactiveSamples,timeToInteractiveHistogram,histograms);addSamplesToHistogram(samplesSet.firstCpuIdleSamples,timeToFirstCpuIdleHistogram,histograms);}} tr.metrics.MetricRegistry.register(loadingMetric);return{loadingMetric,getNetworkEventsInRange,collectLoadingMetricsForRenderer,};});'use strict';tr.exportTo('tr.metrics',function(){const SPA_NAVIGATION_START_TO_FIRST_PAINT_DURATION_BIN_BOUNDARY=tr.v.HistogramBinBoundaries.createExponential(1,1000,50);function spaNavigationMetric(histograms,model){const histogram=new tr.v.Histogram('spaNavigationStartToFpDuration',tr.b.Unit.byName.timeDurationInMs_smallerIsBetter,SPA_NAVIGATION_START_TO_FIRST_PAINT_DURATION_BIN_BOUNDARY);histogram.description='Latency between the input event causing'+' a SPA navigation and the first paint event after it';histogram.customizeSummaryOptions({count:false,sum:false,});const modelHelper=model.getOrCreateHelper(tr.model.helpers.ChromeModelHelper);if(!modelHelper){return;} const rendererHelpers=modelHelper.rendererHelpers;if(!rendererHelpers){return;} const browserHelper=modelHelper.browserHelper;for(const rendererHelper of Object.values(rendererHelpers)){const spaNavigations=tr.metrics.findSpaNavigationsOnRenderer(rendererHelper,browserHelper);for(const spaNav of spaNavigations){let beginTs=0;if(spaNav.navStartCandidates.inputLatencyAsyncSlice){const beginData=spaNav.navStartCandidates.inputLatencyAsyncSlice.args.data;beginTs=model.convertTimestampToModelTime('traceEventClock',beginData.INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT.time);}else{beginTs=spaNav.navStartCandidates.goToIndexSlice.start;} @@ -8117,7 +8117,7 @@ const benchmarks=hist.diagnostics.get(tr.v.d.RESERVED_NAMES.BENCHMARKS);const start=hist.diagnostics.get(tr.v.d.RESERVED_NAMES.BENCHMARK_START);if(benchmarks===undefined){if(start===undefined)return'Value';return start.toString();} const benchmarksStr=Array.from(benchmarks).join('\n');if(start===undefined)return benchmarksStr;return benchmarksStr+'\n'+start.toString();});class GenericSetGrouping extends HistogramGrouping{constructor(name){super(name,undefined);this.callback_=this.compute_.bind(this);} compute_(hist){const diag=hist.diagnostics.get(this.key);if(diag===undefined)return'';const parts=Array.from(diag);parts.sort();return parts.join(',');}} -GenericSetGrouping.NAMES=[tr.v.d.RESERVED_NAMES.ARCHITECTURES,tr.v.d.RESERVED_NAMES.BENCHMARKS,tr.v.d.RESERVED_NAMES.BOTS,tr.v.d.RESERVED_NAMES.BUILDS,tr.v.d.RESERVED_NAMES.DEVICE_IDS,tr.v.d.RESERVED_NAMES.PRIMARYS,tr.v.d.RESERVED_NAMES.MEMORY_AMOUNTS,tr.v.d.RESERVED_NAMES.OS_NAMES,tr.v.d.RESERVED_NAMES.OS_VERSIONS,tr.v.d.RESERVED_NAMES.PRODUCT_VERSIONS,tr.v.d.RESERVED_NAMES.STORIES,tr.v.d.RESERVED_NAMES.STORYSET_REPEATS,tr.v.d.RESERVED_NAMES.STORY_TAGS,];for(const name of GenericSetGrouping.NAMES){new GenericSetGrouping(name);} +GenericSetGrouping.NAMES=[tr.v.d.RESERVED_NAMES.ARCHITECTURES,tr.v.d.RESERVED_NAMES.BENCHMARKS,tr.v.d.RESERVED_NAMES.BOTS,tr.v.d.RESERVED_NAMES.BUILDS,tr.v.d.RESERVED_NAMES.DEVICE_IDS,tr.v.d.RESERVED_NAMES.MASTERS,tr.v.d.RESERVED_NAMES.MEMORY_AMOUNTS,tr.v.d.RESERVED_NAMES.OS_NAMES,tr.v.d.RESERVED_NAMES.OS_VERSIONS,tr.v.d.RESERVED_NAMES.PRODUCT_VERSIONS,tr.v.d.RESERVED_NAMES.STORIES,tr.v.d.RESERVED_NAMES.STORYSET_REPEATS,tr.v.d.RESERVED_NAMES.STORY_TAGS,];for(const name of GenericSetGrouping.NAMES){new GenericSetGrouping(name);} class DateRangeGrouping extends HistogramGrouping{constructor(name){super(name,undefined);this.callback_=this.compute_.bind(this);} compute_(hist){const diag=hist.diagnostics.get(this.key);if(diag===undefined)return'';return diag.toString();}} DateRangeGrouping.NAMES=[tr.v.d.RESERVED_NAMES.BENCHMARK_START,tr.v.d.RESERVED_NAMES.TRACE_START,];for(const name of DateRangeGrouping.NAMES){new DateRangeGrouping(name);} @@ -10018,10 +10018,10 @@ root 504 2 0 0 rescuer_thread 0 S [sb-1] 5 root 505 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread1] 5 root 506 2 0 0 irq_thread 0 S [irq/308-mnh-rea] 5 -root 507 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl1] 5 +root 507 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl1] 5 root 508 2 0 0 rescuer_thread 0 S [sb-3] 5 root 509 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread3] 5 -root 510 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl3] 5 +root 510 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl3] 5 root 511 2 0 0 rescuer_thread 0 S [tbn_event_proce] 5 root 512 2 0 0 irq_thread 0 S [irq/78-qpnp_fla] 5 root 513 2 0 0 irq_thread 0 S [irq/77-qpnp_fla] 5 @@ -10052,7 +10052,7 @@ root 546 2 0 0 irq_thread 0 S [irq/381-fts] 5 logd 555 1 30408 13716 SyS_rt_sigsuspend 0 S logd logd system 556 1 15876 3680 do_wait 0 S qseecomd qseecomd -system 557 1 17460 3660 binder_ioctl 0 S [email protected] [email protected] +system 557 1 17460 3660 binder_ioctl 0 S [email protected] [email protected] root 559 2 0 0 kthread_worker_fn 0 S [sugov:0] 5 root 560 2 0 0 kthread_worker_fn 0 S [sugov:4] 5 root 565 2 0 0 kauditd_thread 0 S [kauditd] 5 @@ -10065,7 +10065,7 @@ system 588 1 11188 2556 binder_ioctl 0 S vndservicemanager vndservicemanager root 590 1 12516 3088 binder_ioctl 0 S [email protected] [email protected] system 591 1 13968 3292 binder_ioctl 0 S [email protected] [email protected] -hsm 592 1 2127108 6116 binder_ioctl 0 S [email protected] [email protected] +hsm 592 1 2127108 6116 binder_ioctl 0 S [email protected] [email protected] hsm 593 1 16388 2880 binder_ioctl 0 S citadeld citadeld system 595 1 20052 4528 do_sys_poll 0 S sscrpcd sscrpcd system 598 1 2142956 14468 binder_ioctl 0 S [email protected] [email protected] @@ -10728,7 +10728,7 @@ logd 555 573 logd.klogd logd 555 574 logd.auditd system 556 556 qseecomd -system 557 557 [email protected] +system 557 557 [email protected] system 557 602 HwBinder:557_1 root 559 559 sugov:0 root 560 560 sugov:4 @@ -10757,7 +10757,7 @@ system 588 588 vndservicemanag root 590 590 [email protected] system 591 591 [email protected] -hsm 592 592 [email protected] +hsm 592 592 [email protected] hsm 593 593 citadeld hsm 593 603 citadeld hsm 593 604 Binder:593_1 @@ -12770,10 +12770,10 @@ root 504 2 0 0 rescuer_thread 0 S [sb-1] 5 root 505 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread1] 5 root 506 2 0 0 irq_thread 0 S [irq/308-mnh-rea] 5 -root 507 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl1] 5 +root 507 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl1] 5 root 508 2 0 0 rescuer_thread 0 S [sb-3] 5 root 509 2 0 0 ngd_slim_rx_msgq_thread 0 S [ngd_rx_thread3] 5 -root 510 2 0 0 ngd_notify_partners 0 S [ngd_notify_sl3] 5 +root 510 2 0 0 ngd_notify_slaves 0 S [ngd_notify_sl3] 5 root 511 2 0 0 rescuer_thread 0 S [tbn_event_proce] 5 root 512 2 0 0 irq_thread 0 S [irq/78-qpnp_fla] 5 root 513 2 0 0 irq_thread 0 S [irq/77-qpnp_fla] 5 @@ -12804,7 +12804,7 @@ root 546 2 0 0 irq_thread 0 S [irq/381-fts] 5 logd 555 1 30408 13708 SyS_rt_sigsuspend 0 S logd logd system 556 1 15876 3680 do_wait 0 S qseecomd qseecomd -system 557 1 17460 3664 binder_ioctl 0 S [email protected] [email protected] +system 557 1 17460 3664 binder_ioctl 0 S [email protected] [email protected] root 559 2 0 0 kthread_worker_fn 0 S [sugov:0] 5 root 560 2 0 0 kthread_worker_fn 0 S [sugov:4] 5 root 565 2 0 0 kauditd_thread 0 S [kauditd] 5 @@ -12817,7 +12817,7 @@ system 588 1 11188 2556 binder_ioctl 0 S vndservicemanager vndservicemanager root 590 1 12516 3092 binder_ioctl 0 S [email protected] [email protected] system 591 1 13968 3296 binder_ioctl 0 S [email protected] [email protected] -hsm 592 1 2127108 6120 binder_ioctl 0 S [email protected] [email protected] +hsm 592 1 2127108 6120 binder_ioctl 0 S [email protected] [email protected] hsm 593 1 16388 2880 binder_ioctl 0 S citadeld citadeld system 595 1 20052 4528 do_sys_poll 0 S sscrpcd sscrpcd system 598 1 2142956 14468 binder_ioctl 0 S [email protected] [email protected] @@ -13480,7 +13480,7 @@ logd 555 573 logd.klogd logd 555 574 logd.auditd system 556 556 qseecomd -system 557 557 [email protected] +system 557 557 [email protected] system 557 602 HwBinder:557_1 root 559 559 sugov:0 root 560 560 sugov:4 @@ -13509,7 +13509,7 @@ system 588 588 vndservicemanag root 590 590 [email protected] system 591 591 [email protected] -hsm 592 592 [email protected] +hsm 592 592 [email protected] hsm 593 593 citadeld hsm 593 603 citadeld hsm 593 604 Binder:593_1
diff --git a/tools/systrace_parser/parser/tracker.py b/tools/systrace_parser/parser/tracker.py index 79097f3..887f9bb 100644 --- a/tools/systrace_parser/parser/tracker.py +++ b/tools/systrace_parser/parser/tracker.py
@@ -98,7 +98,7 @@ if switch: # End previous item self.handle_mark(time, "E") - # Push a placeholder item that will get popped by the 'real' end of the + # Push a dummy item that will get popped by the 'real' end of the # previous item. self.mytree.push_dummy(time) m = self.matcher.search(mark) @@ -122,7 +122,7 @@ elif mark[0] == "E": try: node = self.mytree.pop(time) - if node.is_dummy(): # Placeholder item + if node.is_dummy(): # Dummy item pass else: if node.layer == LAYER_APPLICATION and node.phase in [PHASE_WARMUP, PHASE_BENCHMARK]:
diff --git a/tools/test_generator/README.md b/tools/test_generator/README.md index b4ec495..0297dce 100644 --- a/tools/test_generator/README.md +++ b/tools/test_generator/README.md
@@ -196,7 +196,7 @@ #### AllOutputsAsInternalCoverter -Add a placeholder ADD operation after each model output to make it as an internal operand. Will skip if the model does not have any output tensor that is compatible with the ADD operation or if the model has more than one operation. +Add a dummy ADD operation after each model output to make it as an internal operand. Will skip if the model does not have any output tensor that is compatible with the ADD operation or if the model has more than one operation. #### Add variation to example @@ -230,13 +230,18 @@ - **AllTensorsAsInputsConverter:** Convert all constant tensors in the model to model inputs. Will skip if the model does not have any constant tensor, or if the model has more than one operations. If not explicitly disabled, this variation will be automatically applied to all tests. -- **AllInputsAsInternalCoverter:** Add a placeholder ADD operation before each model input to make it as an internal operand. Will skip if the model does not have any input tensor that is compatible to the ADD operation, or if the model has more than one operations. If not explicitly disabled, this variation will be automatically applied to all tests. +- **AllInputsAsInternalCoverter:** Add a dummy ADD operation before each model input to make it as an internal operand. Will skip if the model does not have any input tensor that is compatible to the ADD operation, or if the model has more than one operations. If not explicitly disabled, this variation will be automatically applied to all tests. + +- **DynamicOutputShapeConverter:** Convert the model to enable dynamic output shape test. If not explicitly disabled, this variation will be automatically applied to all tests introduce in HAL version 1.2 or later. You can opt-out by invoking the corresponding methods on examples. ```Python # Disable AllTensorsAsInputsConverter and AllInputsAsInternalCoverter. example.DisableLifeTimeVariation() + +# Disable DynamicOutputShapeConverter. +example.DisableDynamicOutputShapeVariation() ``` You may also specify a certain operand to be input/const-only that `AllInputsAsInternalCoverter` will skip converting this operand.
diff --git a/tools/test_generator/test_generator.py b/tools/test_generator/test_generator.py index 6cc6eba..92dfad7 100755 --- a/tools/test_generator/test_generator.py +++ b/tools/test_generator/test_generator.py
@@ -222,9 +222,27 @@ def IsScalar(self): return not self.type.startswith("TENSOR_") + def GetElementByteSize(self): + cppTypeString = self.GetCppTypeString() + if cppTypeString in ["uint8_t", "int8_t", "bool8"]: + return 1 + elif cppTypeString in ["int16_t", "uint16_t", "_Float16"]: + return 2 + else: + return 4 + + def GetByteSize(self): + return self.GetElementByteSize() * self.GetNumberOfElements() + + def GetDimensionsString(self): + return "{" + GetJointStr(self.dimensions) + "}" + def GetSignatureTuple(self): return (self.type, self.dimensions, self.scale, self.zeroPoint) + def ToUnspecifiedDim(self): + return Type.GetType(self.type, [0] * len(self.dimensions), self.scale, self.zeroPoint) + # To track implicitly convertible parameter types class ImplicitParameter(): @staticmethod @@ -249,6 +267,17 @@ bshape[self.channelDim] = len(self.scales) return np.array(self.scales).reshape(bshape) + def GetConstructor(self): + return "SymmPerChannelQuantParams({%s},%d)" % ( + ", ".join(str(x) + "f" for x in self.scales), self.channelDim) + + def GetVtsSetter(self): + return "channelQuant" + + def GetVtsConstructor(self): + return "SymmPerChannelQuantParams{.scales={%s}, .channelDim=%d}" % ( + ", ".join(str(x) + "f" for x in self.scales), self.channelDim) + # An operand that can be fed into operations. Also, an operand is always # declared before operations. @@ -291,6 +320,10 @@ else: return "{%s}"%(GetJointStr(self.value, method=lambda x: str(int(x)))) + def ToUnspecifiedDim(self): + self.dimensions = self.type.dimensions + self.type = self.type.ToUnspecifiedDim() + def ConvertTo(self, DerivedClass, name=None): assert issubclass(DerivedClass, Operand) name = self.name if name is None else name @@ -462,6 +495,12 @@ self.referenced_models = None Model.models.append(self) + def WithSuffix(self, *args): + self.createFunctionName = GlobalVariable("CreateModel", self.name, *args) + self.createTestFunctionName = GlobalVariable("createTestModel", self.name, *args) + self.isIgnoredFunctionName = GlobalVariable("is_ignored", self.name, *args) + return self + def AddOperand(self, operand): if operand not in self.operands: self.operands.append(operand) @@ -984,7 +1023,7 @@ op.type.type in ["TENSOR_FLOAT32", "TENSOR_QUANT8_ASYMM", "TENSOR_FLOAT16", "TENSOR_QUANT8_ASYMM_SIGNED"]) -# Add a placeholder ADD operation before each model input to make it as an internal operand. +# Add a dummy ADD operation before each model input to make it as an internal operand. class AllInputsAsInternalCoverter(ModelVariation): supportsSubgraphs = True @@ -1004,19 +1043,18 @@ if not modelInputs: raise SkipVariation - # Make every input an output of a placeholder operation: input_new ADD placeholder = input. + # Make every input an output of a dummy operation: input_new ADD dummy = input. for op in modelInputs: newInput = op.ConvertTo(Input, name=op.name + "_new") - placeholderParam = Parameter("placeholder", - (op.type.type, [1], op.type.scale, op.type.zeroPoint), - [op.type.zeroPoint]) - model.Operation("ADD", newInput, placeholderParam, 0).To(op) + dummyParam = Parameter("dummy", (op.type.type, [1], op.type.scale, op.type.zeroPoint), + [op.type.zeroPoint]) + model.Operation("ADD", newInput, dummyParam, 0).To(op) # Convert to internal operands. model.UpdateEquivalentOperands([op.ConvertTo(Internal) for op in modelInputs]) return model -# Add a placeholder ADD operation after each model output to make it as an internal operand. +# Add a dummy ADD operation after each model output to make it as an internal operand. class AllOutputsAsInternalCoverter(ModelVariation): supportsSubgraphs = True @@ -1036,13 +1074,12 @@ if not modelOutputs: raise SkipVariation - # Make every output an input of a placeholder operation: output ADD placeholder = output_new. + # Make every output an input of a dummy operation: output ADD dummy = output_new. for op in modelOutputs: newOutput = op.ConvertTo(Output, name=op.name + "_new") - placeholderParam = Parameter("placeholder", - (op.type.type, [1], op.type.scale, op.type.zeroPoint), - [op.type.zeroPoint]) - model.Operation("ADD", op, placeholderParam, 0).To(newOutput) + dummyParam = Parameter("dummy", (op.type.type, [1], op.type.scale, op.type.zeroPoint), + [op.type.zeroPoint]) + model.Operation("ADD", op, dummyParam, 0).To(newOutput) # Convert to internal operands. model.UpdateEquivalentOperands([op.ConvertTo(Internal) for op in modelOutputs]) @@ -1058,6 +1095,7 @@ self.name = name self.expectedMultinomialDistributionTolerance = 0 self.expectFailure = False + self.testDynamicOutputShape = True self.testLifeTimeVariation = True self.feedDicts = [] for feedDict in args: @@ -1229,7 +1267,7 @@ *varNames) if str(self.testName) in Example.versionOverrides: self.model.IntroducedIn(Example.versionOverrides[str(self.testName)]) - self.model.Compile() + self.model.WithSuffix(*varNames).Compile() # Dump files if DumpExample is not None and example_fd is not None: @@ -1255,6 +1293,10 @@ self.expectFailure = True return self + def DisableDynamicOutputShapeVariation(self): + self.testDynamicOutputShape = False + return self + def DisableLifeTimeVariation(self): self.testLifeTimeVariation = False return self
diff --git a/tools/test_generator/test_harness/Android.bp b/tools/test_generator/test_harness/Android.bp index 04442cd..7b8c0b4 100644 --- a/tools/test_generator/test_harness/Android.bp +++ b/tools/test_generator/test_harness/Android.bp
@@ -14,18 +14,13 @@ // limitations under the License. // -package { - default_applicable_licenses: ["Android-Apache-2.0"], -} - cc_library_static { name: "libneuralnetworks_generated_test_harness", srcs: [ "TestHarness.cpp", ], + host_supported: false, defaults: ["neuralnetworks_defaults"], - host_supported: true, - vendor_available: true, export_include_dirs: ["include"], static_libs: [ "libbase", @@ -34,26 +29,6 @@ ], } -cc_library_static { - name: "neuralnetworks_test_utils", - defaults: ["neuralnetworks_utils_defaults"], - srcs: [ - "Assertions.cpp", - "TestUtils.cpp", - ], - local_include_dirs: ["include/nnapi"], - export_include_dirs: ["include"], - static_libs: [ - "libneuralnetworks_generated_test_harness", - "neuralnetworks_types", - ], - header_libs: [ - "libbase_headers", - "libcutils_headers", - "libutils_headers", - ], -} - // Deprecated. cc_library_headers { name: "libneuralnetworks_generated_test_harness_headers", @@ -68,9 +43,3 @@ "TestHarness.cpp", ], } - -cc_library_headers { - name: "libneuralnetworks_generated_test_harness_headers_for_cts", - export_include_dirs: ["include"], - sdk_version: "current", -}
diff --git a/tools/test_generator/test_harness/Assertions.cpp b/tools/test_generator/test_harness/Assertions.cpp deleted file mode 100644 index cb1f60f..0000000 --- a/tools/test_generator/test_harness/Assertions.cpp +++ /dev/null
@@ -1,193 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <nnapi/OperandTypes.h> -#include <nnapi/OperationTypes.h> -#include <nnapi/Types.h> - -#include <type_traits> - -#include "TestHarness.h" - -namespace { - -#define COMPARE_ENUMS_TYPES(lhsType, rhsType) \ - static_assert(std::is_same_v<std::underlying_type_t<::test_helper::lhsType>, \ - std::underlying_type_t<::android::nn::rhsType>>, \ - "::test_helper::" #lhsType \ - " does not have the same underlying type as ::android::nn::" #rhsType) - -COMPARE_ENUMS_TYPES(TestOperandType, OperandType); -COMPARE_ENUMS_TYPES(TestOperationType, OperationType); -COMPARE_ENUMS_TYPES(TestOperandLifeTime, Operand::LifeTime); - -#undef COMPARE_ENUMS_TYPES - -#define COMPARE_ENUMS_FULL(symbol, lhsType, rhsType) \ - static_assert(static_cast<std::underlying_type_t<::test_helper::lhsType>>( \ - ::test_helper::lhsType::symbol) == \ - static_cast<std::underlying_type_t<::android::nn::rhsType>>( \ - ::android::nn::rhsType::symbol), \ - "::test_helper::" #lhsType "::" #symbol \ - " does not match ::android::nn::" #rhsType "::" #symbol) - -#define COMPARE_ENUMS(symbol) COMPARE_ENUMS_FULL(symbol, TestOperandType, OperandType) - -COMPARE_ENUMS(FLOAT32); -COMPARE_ENUMS(INT32); -COMPARE_ENUMS(UINT32); -COMPARE_ENUMS(TENSOR_FLOAT32); -COMPARE_ENUMS(TENSOR_INT32); -COMPARE_ENUMS(TENSOR_QUANT8_ASYMM); -COMPARE_ENUMS(BOOL); -COMPARE_ENUMS(TENSOR_QUANT16_SYMM); -COMPARE_ENUMS(TENSOR_FLOAT16); -COMPARE_ENUMS(TENSOR_BOOL8); -COMPARE_ENUMS(FLOAT16); -COMPARE_ENUMS(TENSOR_QUANT8_SYMM_PER_CHANNEL); -COMPARE_ENUMS(TENSOR_QUANT16_ASYMM); -COMPARE_ENUMS(TENSOR_QUANT8_SYMM); -COMPARE_ENUMS(TENSOR_QUANT8_ASYMM_SIGNED); -COMPARE_ENUMS(SUBGRAPH); -// COMPARE_ENUMS(OEM); -// COMPARE_ENUMS(TENSOR_OEM_BYTE); - -#undef COMPARE_ENUMS - -#define COMPARE_ENUMS(symbol) COMPARE_ENUMS_FULL(symbol, TestOperationType, OperationType) - -COMPARE_ENUMS(ADD); -COMPARE_ENUMS(AVERAGE_POOL_2D); -COMPARE_ENUMS(CONCATENATION); -COMPARE_ENUMS(CONV_2D); -COMPARE_ENUMS(DEPTHWISE_CONV_2D); -COMPARE_ENUMS(DEPTH_TO_SPACE); -COMPARE_ENUMS(DEQUANTIZE); -COMPARE_ENUMS(EMBEDDING_LOOKUP); -COMPARE_ENUMS(FLOOR); -COMPARE_ENUMS(FULLY_CONNECTED); -COMPARE_ENUMS(HASHTABLE_LOOKUP); -COMPARE_ENUMS(L2_NORMALIZATION); -COMPARE_ENUMS(L2_POOL_2D); -COMPARE_ENUMS(LOCAL_RESPONSE_NORMALIZATION); -COMPARE_ENUMS(LOGISTIC); -COMPARE_ENUMS(LSH_PROJECTION); -COMPARE_ENUMS(LSTM); -COMPARE_ENUMS(MAX_POOL_2D); -COMPARE_ENUMS(MUL); -COMPARE_ENUMS(RELU); -COMPARE_ENUMS(RELU1); -COMPARE_ENUMS(RELU6); -COMPARE_ENUMS(RESHAPE); -COMPARE_ENUMS(RESIZE_BILINEAR); -COMPARE_ENUMS(RNN); -COMPARE_ENUMS(SOFTMAX); -COMPARE_ENUMS(SPACE_TO_DEPTH); -COMPARE_ENUMS(SVDF); -COMPARE_ENUMS(TANH); -COMPARE_ENUMS(BATCH_TO_SPACE_ND); -COMPARE_ENUMS(DIV); -COMPARE_ENUMS(MEAN); -COMPARE_ENUMS(PAD); -COMPARE_ENUMS(SPACE_TO_BATCH_ND); -COMPARE_ENUMS(SQUEEZE); -COMPARE_ENUMS(STRIDED_SLICE); -COMPARE_ENUMS(SUB); -COMPARE_ENUMS(TRANSPOSE); -COMPARE_ENUMS(ABS); -COMPARE_ENUMS(ARGMAX); -COMPARE_ENUMS(ARGMIN); -COMPARE_ENUMS(AXIS_ALIGNED_BBOX_TRANSFORM); -COMPARE_ENUMS(BIDIRECTIONAL_SEQUENCE_LSTM); -COMPARE_ENUMS(BIDIRECTIONAL_SEQUENCE_RNN); -COMPARE_ENUMS(BOX_WITH_NMS_LIMIT); -COMPARE_ENUMS(CAST); -COMPARE_ENUMS(CHANNEL_SHUFFLE); -COMPARE_ENUMS(DETECTION_POSTPROCESSING); -COMPARE_ENUMS(EQUAL); -COMPARE_ENUMS(EXP); -COMPARE_ENUMS(EXPAND_DIMS); -COMPARE_ENUMS(GATHER); -COMPARE_ENUMS(GENERATE_PROPOSALS); -COMPARE_ENUMS(GREATER); -COMPARE_ENUMS(GREATER_EQUAL); -COMPARE_ENUMS(GROUPED_CONV_2D); -COMPARE_ENUMS(HEATMAP_MAX_KEYPOINT); -COMPARE_ENUMS(INSTANCE_NORMALIZATION); -COMPARE_ENUMS(LESS); -COMPARE_ENUMS(LESS_EQUAL); -COMPARE_ENUMS(LOG); -COMPARE_ENUMS(LOGICAL_AND); -COMPARE_ENUMS(LOGICAL_NOT); -COMPARE_ENUMS(LOGICAL_OR); -COMPARE_ENUMS(LOG_SOFTMAX); -COMPARE_ENUMS(MAXIMUM); -COMPARE_ENUMS(MINIMUM); -COMPARE_ENUMS(NEG); -COMPARE_ENUMS(NOT_EQUAL); -COMPARE_ENUMS(PAD_V2); -COMPARE_ENUMS(POW); -COMPARE_ENUMS(PRELU); -COMPARE_ENUMS(QUANTIZE); -COMPARE_ENUMS(QUANTIZED_16BIT_LSTM); -COMPARE_ENUMS(RANDOM_MULTINOMIAL); -COMPARE_ENUMS(REDUCE_ALL); -COMPARE_ENUMS(REDUCE_ANY); -COMPARE_ENUMS(REDUCE_MAX); -COMPARE_ENUMS(REDUCE_MIN); -COMPARE_ENUMS(REDUCE_PROD); -COMPARE_ENUMS(REDUCE_SUM); -COMPARE_ENUMS(ROI_ALIGN); -COMPARE_ENUMS(ROI_POOLING); -COMPARE_ENUMS(RSQRT); -COMPARE_ENUMS(SELECT); -COMPARE_ENUMS(SIN); -COMPARE_ENUMS(SLICE); -COMPARE_ENUMS(SPLIT); -COMPARE_ENUMS(SQRT); -COMPARE_ENUMS(TILE); -COMPARE_ENUMS(TOPK_V2); -COMPARE_ENUMS(TRANSPOSE_CONV_2D); -COMPARE_ENUMS(UNIDIRECTIONAL_SEQUENCE_LSTM); -COMPARE_ENUMS(UNIDIRECTIONAL_SEQUENCE_RNN); -COMPARE_ENUMS(RESIZE_NEAREST_NEIGHBOR); -COMPARE_ENUMS(QUANTIZED_LSTM); -COMPARE_ENUMS(IF); -COMPARE_ENUMS(WHILE); -COMPARE_ENUMS(ELU); -COMPARE_ENUMS(HARD_SWISH); -COMPARE_ENUMS(FILL); -COMPARE_ENUMS(RANK); -// COMPARE_ENUMS(OEM_OPERATION); - -#undef COMPARE_ENUMS - -#define COMPARE_ENUMS(symbol) COMPARE_ENUMS_FULL(symbol, TestOperandLifeTime, Operand::LifeTime) - -COMPARE_ENUMS(TEMPORARY_VARIABLE); -COMPARE_ENUMS(SUBGRAPH_INPUT); -COMPARE_ENUMS(SUBGRAPH_OUTPUT); -COMPARE_ENUMS(CONSTANT_COPY); -COMPARE_ENUMS(CONSTANT_REFERENCE); -COMPARE_ENUMS(NO_VALUE); -COMPARE_ENUMS(SUBGRAPH); -// COMPARE_ENUMS(POINTER); - -#undef COMPARE_ENUMS - -#undef COMPARE_ENUMS_FULL - -} // namespace
diff --git a/tools/test_generator/test_harness/TestHarness.cpp b/tools/test_generator/test_harness/TestHarness.cpp index 7614c65..56e1414 100644 --- a/tools/test_generator/test_harness/TestHarness.cpp +++ b/tools/test_generator/test_harness/TestHarness.cpp
@@ -17,7 +17,7 @@ #include "TestHarness.h" #include <android-base/logging.h> -#include <gmock/gmock.h> +#include <gmock/gmock-matchers.h> #include <gtest/gtest.h> #include <algorithm> @@ -558,12 +558,12 @@ } // namespace -std::ostream& operator<<(std::ostream& os, const TestOperandType& type) { - return os << kOperandTypeNames[static_cast<int>(type)]; +const char* toString(TestOperandType type) { + return kOperandTypeNames[static_cast<int>(type)]; } -std::ostream& operator<<(std::ostream& os, const TestOperationType& type) { - return os << kOperationTypeNames[static_cast<int>(type)]; +const char* toString(TestOperationType type) { + return kOperationTypeNames[static_cast<int>(type)]; } // Dump a test buffer. @@ -605,7 +605,7 @@ void SpecDumper::dumpTestOperand(const TestOperand& operand, uint32_t index) { mOs << "op" << index << " = " << getOperandClassInSpecFile(operand.lifetime) << "(\"op" << index - << "\", [\"" << operand.type << "\", [" + << "\", [\"" << toString(operand.type) << "\", [" << join(", ", operand.dimensions, defaultToStringFunc<uint32_t>) << "]"; if (operand.scale != 0.0f || operand.zeroPoint != 0) { mOs << ", float.fromhex(" << toHexFloatString(operand.scale) << "), " << operand.zeroPoint; @@ -635,7 +635,7 @@ void SpecDumper::dumpTestOperation(const TestOperation& operation) { auto toOperandName = [](uint32_t index) { return "op" + std::to_string(index); }; - mOs << "model = model.Operation(\"" << operation.type << "\", " + mOs << "model = model.Operation(\"" << toString(operation.type) << "\", " << join(", ", operation.inputs, toOperandName) << ").To(" << join(", ", operation.outputs, toOperandName) << ")\n"; }
diff --git a/tools/test_generator/test_harness/TestUtils.cpp b/tools/test_generator/test_harness/TestUtils.cpp deleted file mode 100644 index 5f47439..0000000 --- a/tools/test_generator/test_harness/TestUtils.cpp +++ /dev/null
@@ -1,195 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <android-base/logging.h> -#include <nnapi/OperandTypes.h> -#include <nnapi/OperationTypes.h> -#include <nnapi/SharedMemory.h> -#include <nnapi/Types.h> - -#include <algorithm> -#include <iterator> -#include <limits> -#include <memory> -#include <utility> -#include <vector> - -#include "TestHarness.h" - -namespace android::nn::test { -namespace { - -using ::test_helper::TestBuffer; -using ::test_helper::TestModel; -using ::test_helper::TestOperand; -using ::test_helper::TestOperandLifeTime; -using ::test_helper::TestOperandType; -using ::test_helper::TestOperation; -using ::test_helper::TestSubgraph; - -Operand createOperand(const TestOperand& operand, Model::OperandValues* operandValues, - ConstantMemoryBuilder* memoryBuilder) { - CHECK(operandValues != nullptr); - CHECK(memoryBuilder != nullptr); - - const OperandType type = static_cast<OperandType>(operand.type); - const Operand::LifeTime lifetime = static_cast<Operand::LifeTime>(operand.lifetime); - - DataLocation location; - switch (operand.lifetime) { - case TestOperandLifeTime::TEMPORARY_VARIABLE: - case TestOperandLifeTime::SUBGRAPH_INPUT: - case TestOperandLifeTime::SUBGRAPH_OUTPUT: - case TestOperandLifeTime::NO_VALUE: - break; - case TestOperandLifeTime::CONSTANT_COPY: - location = operandValues->append(operand.data.get<uint8_t>(), operand.data.size()); - break; - case TestOperandLifeTime::CONSTANT_REFERENCE: - location = memoryBuilder->append(operand.data.get<void>(), operand.data.size()); - break; - case TestOperandLifeTime::SUBGRAPH: - location = {.offset = *operand.data.get<uint32_t>()}; - break; - } - - Operand::ExtraParams extraParams; - if (operand.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - extraParams = - Operand::SymmPerChannelQuantParams{.scales = operand.channelQuant.scales, - .channelDim = operand.channelQuant.channelDim}; - } - - return { - .type = type, - .dimensions = operand.dimensions, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = lifetime, - .location = location, - .extraParams = std::move(extraParams), - }; -} - -Model::Subgraph createSubgraph(const TestSubgraph& testSubgraph, - Model::OperandValues* operandValues, - ConstantMemoryBuilder* memoryBuilder) { - // Operands. - std::vector<Operand> operands; - operands.reserve(testSubgraph.operands.size()); - std::transform(testSubgraph.operands.begin(), testSubgraph.operands.end(), - std::back_inserter(operands), - [operandValues, memoryBuilder](const TestOperand& operand) { - return createOperand(operand, operandValues, memoryBuilder); - }); - - // Operations. - std::vector<Operation> operations; - operations.reserve(testSubgraph.operations.size()); - std::transform(testSubgraph.operations.begin(), testSubgraph.operations.end(), - std::back_inserter(operations), [](const TestOperation& op) -> Operation { - return {.type = static_cast<OperationType>(op.type), - .inputs = op.inputs, - .outputs = op.outputs}; - }); - - return {.operands = std::move(operands), - .operations = std::move(operations), - .inputIndexes = testSubgraph.inputIndexes, - .outputIndexes = testSubgraph.outputIndexes}; -} - -} // namespace - -Model createModel(const TestModel& testModel) { - Model::OperandValues operandValues; - ConstantMemoryBuilder memoryBuilder(0); - - Model::Subgraph mainSubgraph = createSubgraph(testModel.main, &operandValues, &memoryBuilder); - std::vector<Model::Subgraph> refSubgraphs; - refSubgraphs.reserve(testModel.referenced.size()); - std::transform(testModel.referenced.begin(), testModel.referenced.end(), - std::back_inserter(refSubgraphs), - [&operandValues, &memoryBuilder](const TestSubgraph& testSubgraph) { - return createSubgraph(testSubgraph, &operandValues, &memoryBuilder); - }); - - // Shared memory. - std::vector<SharedMemory> pools; - if (!memoryBuilder.empty()) { - pools.push_back(memoryBuilder.finish().value()); - } - - return {.main = std::move(mainSubgraph), - .referenced = std::move(refSubgraphs), - .operandValues = std::move(operandValues), - .pools = std::move(pools), - .relaxComputationFloat32toFloat16 = testModel.isRelaxed}; -} - -Request createRequest(const TestModel& testModel) { - constexpr uint32_t kInputPoolIndex = 0; - constexpr uint32_t kOutputPoolIndex = 1; - - // Model inputs. - std::vector<Request::Argument> inputs; - inputs.reserve(testModel.main.inputIndexes.size()); - ConstantMemoryBuilder inputBuilder(kInputPoolIndex); - for (uint32_t operandIndex : testModel.main.inputIndexes) { - const auto& op = testModel.main.operands[operandIndex]; - Request::Argument requestArgument; - if (op.data.size() == 0) { - // Omitted input. - requestArgument = {.lifetime = Request::Argument::LifeTime::NO_VALUE}; - } else { - const DataLocation location = inputBuilder.append(op.data.get<void>(), op.data.size()); - requestArgument = {.lifetime = Request::Argument::LifeTime::POOL, - .location = location, - .dimensions = op.dimensions}; - } - inputs.push_back(std::move(requestArgument)); - } - - // Model outputs. - std::vector<Request::Argument> outputs; - outputs.reserve(testModel.main.outputIndexes.size()); - MutableMemoryBuilder outputBuilder(kOutputPoolIndex); - for (uint32_t operandIndex : testModel.main.outputIndexes) { - const auto& op = testModel.main.operands[operandIndex]; - - // In the case of zero-sized output, we should at least provide a one-byte buffer. - // This is because zero-sized tensors are only supported internally to the driver, or - // reported in output shapes. It is illegal for the client to pre-specify a zero-sized - // tensor as model output. Otherwise, we will have two semantic conflicts: - // - "Zero dimension" conflicts with "unspecified dimension". - // - "Omitted operand buffer" conflicts with "zero-sized operand buffer". - size_t bufferSize = std::max<size_t>(op.data.size(), 1); - - const DataLocation location = outputBuilder.append(bufferSize); - outputs.push_back({.lifetime = Request::Argument::LifeTime::POOL, - .location = location, - .dimensions = op.dimensions}); - } - - // Model pools. - auto inputMemory = inputBuilder.finish().value(); - auto outputMemory = outputBuilder.finish().value(); - std::vector<Request::MemoryPool> pools = {std::move(inputMemory), std::move(outputMemory)}; - - return {.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)}; -} - -} // namespace android::nn::test
diff --git a/tools/test_generator/test_harness/include/TestHarness.h b/tools/test_generator/test_harness/include/TestHarness.h index be4f562..ef2dbc8 100644 --- a/tools/test_generator/test_harness/include/TestHarness.h +++ b/tools/test_generator/test_harness/include/TestHarness.h
@@ -226,12 +226,13 @@ // Factory method for creating a randomized buffer with "size" number of // bytes. - static TestBuffer createRandom(size_t size, std::default_random_engine* gen) { - static_assert(kAlignment % sizeof(uint32_t) == 0); + template <typename T> + static TestBuffer createFromRng(size_t size, std::default_random_engine* gen) { + static_assert(kAlignment % sizeof(T) == 0); TestBuffer testBuffer(size); - std::uniform_int_distribution<uint32_t> dist{}; - const size_t count = testBuffer.alignedSize() / sizeof(uint32_t); - std::generate_n(testBuffer.getMutable<uint32_t>(), count, [&] { return dist(*gen); }); + std::uniform_int_distribution<T> dist{}; + const size_t adjustedSize = testBuffer.alignedSize() / sizeof(T); + std::generate_n(testBuffer.getMutable<T>(), adjustedSize, [&] { return dist(*gen); }); return testBuffer; } @@ -428,7 +429,7 @@ return instance; } - // Registers a TestModel to the manager. Returns a placeholder integer for global variable + // Registers a TestModel to the manager. Returns a dummy integer for global variable // initialization. int add(std::string name, const TestModel& testModel) { mTestModels.emplace(std::move(name), &testModel); @@ -508,8 +509,8 @@ TestModel convertQuant8AsymmOperandsToSigned(const TestModel& testModel); -std::ostream& operator<<(std::ostream& os, const TestOperandType& type); -std::ostream& operator<<(std::ostream& os, const TestOperationType& type); +const char* toString(TestOperandType type); +const char* toString(TestOperationType type); // Dump a test model in the format of a spec file for debugging and visualization purpose. class SpecDumper {
diff --git a/tools/test_generator/test_harness/include/nnapi/TestUtils.h b/tools/test_generator/test_harness/include/nnapi/TestUtils.h deleted file mode 100644 index 0e7a2f8..0000000 --- a/tools/test_generator/test_harness/include/nnapi/TestUtils.h +++ /dev/null
@@ -1,32 +0,0 @@ -/* - * Copyright (C) 2020 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_TEST_UTILS_H -#define ANDROID_FRAMEWORKS_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_TEST_UTILS_H - -#include <nnapi/Types.h> - -#include "TestHarness.h" - -namespace android::nn::test { - -Model createModel(const ::test_helper::TestModel& testModel); - -Request createRequest(const ::test_helper::TestModel& testModel); - -} // namespace android::nn::test - -#endif // ANDROID_FRAMEWORKS_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_TEST_UTILS_H
diff --git a/tools/test_generator/test_harness/include_legacy/TestHarness.h b/tools/test_generator/test_harness/include_legacy/TestHarness.h index 863c0b1..f4211f7 100644 --- a/tools/test_generator/test_harness/include_legacy/TestHarness.h +++ b/tools/test_generator/test_harness/include_legacy/TestHarness.h
@@ -20,7 +20,7 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_LEGACY_TEST_HARNESS_H #define ANDROID_FRAMEWORKS_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_LEGACY_TEST_HARNESS_H -#include <gmock/gmock.h> +#include <gmock/gmock-matchers.h> #include <gtest/gtest.h> #include <cmath>